2024-11-26 10:26:08,586 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-26 10:26:08,605 main DEBUG Took 0.016363 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-26 10:26:08,605 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-26 10:26:08,606 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-26 10:26:08,607 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-26 10:26:08,609 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,617 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-26 10:26:08,630 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,632 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,633 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,633 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,634 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,634 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,635 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,636 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,636 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,637 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,638 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,638 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,639 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,639 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,640 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,641 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,641 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,642 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,642 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,643 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,643 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,644 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,644 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,645 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:26:08,645 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,646 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-26 10:26:08,647 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:26:08,649 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-26 10:26:08,651 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-26 10:26:08,652 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-26 10:26:08,653 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-26 10:26:08,654 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-26 10:26:08,664 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-26 10:26:08,667 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-26 10:26:08,669 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-26 10:26:08,669 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-26 10:26:08,669 main DEBUG createAppenders(={Console}) 2024-11-26 10:26:08,670 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-26 10:26:08,671 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-26 10:26:08,671 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-26 10:26:08,672 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-26 10:26:08,672 main DEBUG OutputStream closed 2024-11-26 10:26:08,672 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-26 10:26:08,673 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-26 10:26:08,673 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-26 10:26:08,752 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-26 10:26:08,754 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-26 10:26:08,755 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-26 10:26:08,755 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-26 10:26:08,756 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-26 10:26:08,756 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-26 10:26:08,757 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-26 10:26:08,757 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-26 10:26:08,757 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-26 10:26:08,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-26 10:26:08,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-26 10:26:08,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-26 10:26:08,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-26 10:26:08,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-26 10:26:08,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-26 10:26:08,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-26 10:26:08,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-26 10:26:08,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-26 10:26:08,763 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-26 10:26:08,763 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-26 10:26:08,764 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-26 10:26:08,764 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-26T10:26:09,047 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175 2024-11-26 10:26:09,050 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-26 10:26:09,051 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-26T10:26:09,061 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-26T10:26:09,099 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=7194 2024-11-26T10:26:09,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:26:09,123 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0, deleteOnExit=true 2024-11-26T10:26:09,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:26:09,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/test.cache.data in system properties and HBase conf 2024-11-26T10:26:09,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:26:09,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:26:09,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:26:09,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:26:09,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:26:09,227 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-26T10:26:09,337 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:26:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:26:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:26:09,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:26:09,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:26:09,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:26:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:26:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:26:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:26:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:26:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:26:09,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:26:09,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:26:09,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:26:09,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:26:09,882 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:26:10,240 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-26T10:26:10,316 INFO [Time-limited test {}] log.Log(170): Logging initialized @2462ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-26T10:26:10,393 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:26:10,461 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:26:10,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:26:10,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:26:10,481 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:26:10,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:26:10,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:26:10,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:26:10,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/java.io.tmpdir/jetty-localhost-40693-hadoop-hdfs-3_4_1-tests_jar-_-any-14163282523382696555/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:26:10,698 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40693} 2024-11-26T10:26:10,698 INFO [Time-limited test {}] server.Server(415): Started @2846ms 2024-11-26T10:26:10,723 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:26:11,077 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:26:11,083 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:26:11,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:26:11,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:26:11,085 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:26:11,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:26:11,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:26:11,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/java.io.tmpdir/jetty-localhost-39069-hadoop-hdfs-3_4_1-tests_jar-_-any-3314460496095292389/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:26:11,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39069} 2024-11-26T10:26:11,208 INFO [Time-limited test {}] server.Server(415): Started @3355ms 2024-11-26T10:26:11,264 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:26:11,392 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:26:11,399 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:26:11,400 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:26:11,400 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:26:11,400 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:26:11,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:26:11,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:26:11,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/java.io.tmpdir/jetty-localhost-35563-hadoop-hdfs-3_4_1-tests_jar-_-any-14540713506329721905/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:26:11,521 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:35563} 2024-11-26T10:26:11,521 INFO [Time-limited test {}] server.Server(415): Started @3669ms 2024-11-26T10:26:11,524 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:26:11,701 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data4/current/BP-1555577945-172.17.0.2-1732616769980/current, will proceed with Du for space computation calculation, 2024-11-26T10:26:11,701 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data2/current/BP-1555577945-172.17.0.2-1732616769980/current, will proceed with Du for space computation calculation, 2024-11-26T10:26:11,701 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data1/current/BP-1555577945-172.17.0.2-1732616769980/current, will proceed with Du for space computation calculation, 2024-11-26T10:26:11,701 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data3/current/BP-1555577945-172.17.0.2-1732616769980/current, will proceed with Du for space computation calculation, 2024-11-26T10:26:11,760 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:26:11,761 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:26:11,834 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb32c4d50032c09fb with lease ID 0xd0acc80139e0cdbd: Processing first storage report for DS-33479667-31e2-4cdb-89d5-f226c25374ff from datanode DatanodeRegistration(127.0.0.1:34351, datanodeUuid=993adc71-1bd2-4677-a666-3f17729b3704, infoPort=33163, infoSecurePort=0, ipcPort=43041, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980) 2024-11-26T10:26:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb32c4d50032c09fb with lease ID 0xd0acc80139e0cdbd: from storage DS-33479667-31e2-4cdb-89d5-f226c25374ff node DatanodeRegistration(127.0.0.1:34351, datanodeUuid=993adc71-1bd2-4677-a666-3f17729b3704, infoPort=33163, infoSecurePort=0, ipcPort=43041, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-26T10:26:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ecb78f17d554123 with lease ID 0xd0acc80139e0cdbc: Processing first storage report for DS-a4eca5da-f104-446e-9c68-c308706db69e from datanode DatanodeRegistration(127.0.0.1:38569, datanodeUuid=830c34dd-6958-435e-a93e-d9248c2a42de, infoPort=40019, infoSecurePort=0, ipcPort=40217, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980) 2024-11-26T10:26:11,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ecb78f17d554123 with lease ID 0xd0acc80139e0cdbc: from storage DS-a4eca5da-f104-446e-9c68-c308706db69e node DatanodeRegistration(127.0.0.1:38569, datanodeUuid=830c34dd-6958-435e-a93e-d9248c2a42de, infoPort=40019, infoSecurePort=0, ipcPort=40217, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:26:11,836 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb32c4d50032c09fb with lease ID 0xd0acc80139e0cdbd: Processing first storage report for DS-675323b9-b05c-4416-9085-66105b1a565e from datanode DatanodeRegistration(127.0.0.1:34351, datanodeUuid=993adc71-1bd2-4677-a666-3f17729b3704, infoPort=33163, infoSecurePort=0, ipcPort=43041, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980) 2024-11-26T10:26:11,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb32c4d50032c09fb with lease ID 0xd0acc80139e0cdbd: from storage DS-675323b9-b05c-4416-9085-66105b1a565e node DatanodeRegistration(127.0.0.1:34351, datanodeUuid=993adc71-1bd2-4677-a666-3f17729b3704, infoPort=33163, infoSecurePort=0, ipcPort=43041, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:26:11,837 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ecb78f17d554123 with lease ID 0xd0acc80139e0cdbc: Processing first storage report for DS-f79680eb-df32-4163-8d19-e18f77ef5798 from datanode DatanodeRegistration(127.0.0.1:38569, datanodeUuid=830c34dd-6958-435e-a93e-d9248c2a42de, infoPort=40019, infoSecurePort=0, ipcPort=40217, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980) 2024-11-26T10:26:11,837 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ecb78f17d554123 with lease ID 0xd0acc80139e0cdbc: from storage DS-f79680eb-df32-4163-8d19-e18f77ef5798 node DatanodeRegistration(127.0.0.1:38569, datanodeUuid=830c34dd-6958-435e-a93e-d9248c2a42de, infoPort=40019, infoSecurePort=0, ipcPort=40217, storageInfo=lv=-57;cid=testClusterID;nsid=1149966820;c=1732616769980), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:26:11,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175 2024-11-26T10:26:12,023 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/zookeeper_0, clientPort=54085, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:26:12,033 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54085 2024-11-26T10:26:12,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:12,049 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:12,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:26:12,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:26:12,701 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db with version=8 2024-11-26T10:26:12,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:26:12,797 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-26T10:26:13,045 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:26:13,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:26:13,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:26:13,061 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:26:13,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:26:13,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:26:13,197 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:26:13,257 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-26T10:26:13,265 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-26T10:26:13,269 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:26:13,296 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 16489 (auto-detected) 2024-11-26T10:26:13,297 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-26T10:26:13,316 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42131 2024-11-26T10:26:13,337 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42131 connecting to ZooKeeper ensemble=127.0.0.1:54085 2024-11-26T10:26:13,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:421310x0, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:26:13,383 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42131-0x10153cfc42e0000 connected 2024-11-26T10:26:13,412 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:13,414 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:13,424 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:26:13,428 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db, hbase.cluster.distributed=false 2024-11-26T10:26:13,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:26:13,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42131 2024-11-26T10:26:13,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42131 2024-11-26T10:26:13,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42131 2024-11-26T10:26:13,456 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42131 2024-11-26T10:26:13,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42131 2024-11-26T10:26:13,571 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:26:13,573 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:26:13,573 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:26:13,574 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:26:13,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:26:13,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:26:13,577 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:26:13,579 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:26:13,580 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40793 2024-11-26T10:26:13,582 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40793 connecting to ZooKeeper ensemble=127.0.0.1:54085 2024-11-26T10:26:13,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:13,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:13,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407930x0, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:26:13,594 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:407930x0, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:26:13,594 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40793-0x10153cfc42e0001 connected 2024-11-26T10:26:13,598 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:26:13,606 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:26:13,608 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:26:13,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:26:13,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40793 2024-11-26T10:26:13,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40793 2024-11-26T10:26:13,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40793 2024-11-26T10:26:13,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40793 2024-11-26T10:26:13,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40793 2024-11-26T10:26:13,635 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:42131 2024-11-26T10:26:13,636 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,42131,1732616772849 2024-11-26T10:26:13,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:26:13,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:26:13,646 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,42131,1732616772849 2024-11-26T10:26:13,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:26:13,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:13,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:13,668 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:26:13,669 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,42131,1732616772849 from backup master directory 2024-11-26T10:26:13,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,42131,1732616772849 2024-11-26T10:26:13,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:26:13,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:26:13,674 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:26:13,674 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,42131,1732616772849 2024-11-26T10:26:13,677 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-26T10:26:13,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-26T10:26:13,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase.id] with ID: 3528e78f-e046-48e8-9fcd-8372e94e2855 2024-11-26T10:26:13,738 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/.tmp/hbase.id 2024-11-26T10:26:13,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:26:13,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:26:13,751 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/.tmp/hbase.id]:[hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase.id] 2024-11-26T10:26:13,793 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:13,799 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:26:13,818 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-26T10:26:13,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:13,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:13,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:26:13,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:26:13,856 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:26:13,858 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:26:13,864 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:26:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:26:13,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:26:13,923 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store 2024-11-26T10:26:13,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:26:13,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:26:13,951 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-26T10:26:13,955 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:26:13,956 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:26:13,957 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:26:13,957 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:26:13,959 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:26:13,959 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:26:13,959 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:26:13,961 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616773956Disabling compacts and flushes for region at 1732616773956Disabling writes for close at 1732616773959 (+3 ms)Writing region close event to WAL at 1732616773959Closed at 1732616773959 2024-11-26T10:26:13,963 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/.initializing 2024-11-26T10:26:13,963 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/WALs/94eedbb855cf,42131,1732616772849 2024-11-26T10:26:13,990 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C42131%2C1732616772849, suffix=, logDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/WALs/94eedbb855cf,42131,1732616772849, archiveDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/oldWALs, maxLogs=10 2024-11-26T10:26:14,002 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C42131%2C1732616772849.1732616773996 2024-11-26T10:26:14,024 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/WALs/94eedbb855cf,42131,1732616772849/94eedbb855cf%2C42131%2C1732616772849.1732616773996 2024-11-26T10:26:14,035 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:26:14,037 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:26:14,038 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:26:14,042 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,043 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:26:14,112 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:14,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:26:14,121 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:26:14,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:26:14,126 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:26:14,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:26:14,130 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:26:14,131 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,135 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,136 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,141 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,142 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,145 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:26:14,150 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:26:14,155 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:26:14,156 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718397, jitterRate=-0.08651123940944672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:26:14,165 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732616774056Initializing all the Stores at 1732616774059 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616774059Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616774060 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616774060Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616774061 (+1 ms)Cleaning up temporary data from old regions at 1732616774142 (+81 ms)Region opened successfully at 1732616774164 (+22 ms) 2024-11-26T10:26:14,166 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:26:14,206 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7747da44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:26:14,243 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:26:14,258 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:26:14,258 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:26:14,262 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:26:14,264 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-26T10:26:14,270 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-26T10:26:14,270 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:26:14,299 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:26:14,308 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:26:14,311 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:26:14,314 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:26:14,316 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:26:14,317 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:26:14,320 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:26:14,324 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:26:14,327 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:26:14,329 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:26:14,330 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:26:14,349 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:26:14,350 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:26:14,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:26:14,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:26:14,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:14,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:14,360 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,42131,1732616772849, sessionid=0x10153cfc42e0000, setting cluster-up flag (Was=false) 2024-11-26T10:26:14,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:14,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:14,384 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:26:14,386 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,42131,1732616772849 2024-11-26T10:26:14,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:14,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:14,397 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:26:14,398 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,42131,1732616772849 2024-11-26T10:26:14,405 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:26:14,423 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(746): ClusterId : 3528e78f-e046-48e8-9fcd-8372e94e2855 2024-11-26T10:26:14,427 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:26:14,432 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:26:14,432 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:26:14,435 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:26:14,435 DEBUG [RS:0;94eedbb855cf:40793 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f549e06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:26:14,450 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:40793 2024-11-26T10:26:14,453 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:26:14,453 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:26:14,453 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:26:14,459 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,42131,1732616772849 with port=40793, startcode=1732616773529 2024-11-26T10:26:14,473 DEBUG [RS:0;94eedbb855cf:40793 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:26:14,483 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:26:14,494 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:26:14,502 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:26:14,507 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,42131,1732616772849 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:26:14,515 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:26:14,515 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:26:14,515 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:26:14,515 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:26:14,515 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:26:14,515 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,516 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:26:14,516 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,522 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:26:14,522 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:26:14,525 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732616804525 2024-11-26T10:26:14,527 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:26:14,529 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:26:14,530 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,530 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:26:14,533 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:26:14,534 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:26:14,534 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:26:14,534 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:26:14,536 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,541 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:26:14,542 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:26:14,542 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:26:14,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:26:14,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:26:14,551 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:26:14,551 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db 2024-11-26T10:26:14,552 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:26:14,554 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:26:14,554 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49023, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:26:14,556 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616774555,5,FailOnTimeoutGroup] 2024-11-26T10:26:14,564 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616774556,5,FailOnTimeoutGroup] 2024-11-26T10:26:14,561 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42131 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-26T10:26:14,564 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,565 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:26:14,566 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,567 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:26:14,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:26:14,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:26:14,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:26:14,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:26:14,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:14,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:26:14,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:26:14,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:14,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:26:14,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:26:14,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:14,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:26:14,599 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-26T10:26:14,600 WARN [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-26T10:26:14,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:26:14,601 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:14,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:14,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:26:14,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740 2024-11-26T10:26:14,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740 2024-11-26T10:26:14,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:26:14,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:26:14,609 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:26:14,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:26:14,615 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:26:14,616 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744828, jitterRate=-0.0529026985168457}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:26:14,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732616774574Initializing all the Stores at 1732616774584 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616774585 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616774585Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616774585Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616774585Cleaning up temporary data from old regions at 1732616774608 (+23 ms)Region opened successfully at 1732616774618 (+10 ms) 2024-11-26T10:26:14,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:26:14,619 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:26:14,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:26:14,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:26:14,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:26:14,620 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:26:14,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616774618Disabling compacts and flushes for region at 1732616774619 (+1 ms)Disabling writes for close at 1732616774619Writing region close event to WAL at 1732616774620 (+1 ms)Closed at 1732616774620 2024-11-26T10:26:14,623 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:26:14,623 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:26:14,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:26:14,638 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:26:14,640 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:26:14,701 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,42131,1732616772849 with port=40793, startcode=1732616773529 2024-11-26T10:26:14,703 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42131 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,40793,1732616773529 2024-11-26T10:26:14,706 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42131 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,40793,1732616773529 2024-11-26T10:26:14,714 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db 2024-11-26T10:26:14,714 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33735 2024-11-26T10:26:14,714 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:26:14,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:26:14,724 DEBUG [RS:0;94eedbb855cf:40793 {}] zookeeper.ZKUtil(111): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,40793,1732616773529 2024-11-26T10:26:14,724 WARN [RS:0;94eedbb855cf:40793 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:26:14,724 INFO [RS:0;94eedbb855cf:40793 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:26:14,725 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529 2024-11-26T10:26:14,726 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,40793,1732616773529] 2024-11-26T10:26:14,750 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:26:14,761 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:26:14,766 INFO [RS:0;94eedbb855cf:40793 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:26:14,766 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,767 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:26:14,772 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:26:14,773 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,773 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,774 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,775 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,775 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,775 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,775 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:26:14,775 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:26:14,775 DEBUG [RS:0;94eedbb855cf:40793 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:26:14,776 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,776 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,776 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,777 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,777 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,777 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,40793,1732616773529-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:26:14,791 WARN [94eedbb855cf:42131 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-26T10:26:14,796 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:26:14,798 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,40793,1732616773529-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,798 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,799 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.Replication(171): 94eedbb855cf,40793,1732616773529 started 2024-11-26T10:26:14,817 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:14,817 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,40793,1732616773529, RpcServer on 94eedbb855cf/172.17.0.2:40793, sessionid=0x10153cfc42e0001 2024-11-26T10:26:14,818 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:26:14,818 DEBUG [RS:0;94eedbb855cf:40793 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,40793,1732616773529 2024-11-26T10:26:14,818 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,40793,1732616773529' 2024-11-26T10:26:14,818 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:26:14,820 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:26:14,820 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:26:14,820 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:26:14,821 DEBUG [RS:0;94eedbb855cf:40793 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,40793,1732616773529 2024-11-26T10:26:14,821 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,40793,1732616773529' 2024-11-26T10:26:14,821 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:26:14,821 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:26:14,822 DEBUG [RS:0;94eedbb855cf:40793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:26:14,822 INFO [RS:0;94eedbb855cf:40793 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:26:14,822 INFO [RS:0;94eedbb855cf:40793 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:26:14,930 INFO [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C40793%2C1732616773529, suffix=, logDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529, archiveDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs, maxLogs=32 2024-11-26T10:26:14,933 INFO [RS:0;94eedbb855cf:40793 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616774933 2024-11-26T10:26:14,942 INFO [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616774933 2024-11-26T10:26:14,944 DEBUG [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:26:15,044 DEBUG [94eedbb855cf:42131 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:26:15,058 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,40793,1732616773529 2024-11-26T10:26:15,066 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,40793,1732616773529, state=OPENING 2024-11-26T10:26:15,070 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:26:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:26:15,073 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:26:15,073 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:26:15,074 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:26:15,076 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,40793,1732616773529}] 2024-11-26T10:26:15,251 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:26:15,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59087, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:26:15,266 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:26:15,267 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:26:15,270 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C40793%2C1732616773529.meta, suffix=.meta, logDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529, archiveDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs, maxLogs=32 2024-11-26T10:26:15,272 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.meta.1732616775272.meta 2024-11-26T10:26:15,281 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.meta.1732616775272.meta 2024-11-26T10:26:15,286 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:26:15,289 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:26:15,291 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:26:15,293 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:26:15,298 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:26:15,302 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:26:15,303 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:26:15,303 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:26:15,303 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:26:15,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:26:15,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:26:15,308 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:15,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:15,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:26:15,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:26:15,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:15,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:15,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:26:15,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:26:15,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:15,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:15,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:26:15,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:26:15,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:15,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:26:15,316 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:26:15,317 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740 2024-11-26T10:26:15,319 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740 2024-11-26T10:26:15,321 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:26:15,321 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:26:15,322 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:26:15,324 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:26:15,326 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742545, jitterRate=-0.055806174874305725}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:26:15,326 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:26:15,328 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732616775304Writing region info on filesystem at 1732616775304Initializing all the Stores at 1732616775306 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616775306Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616775307 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616775307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616775307Cleaning up temporary data from old regions at 1732616775321 (+14 ms)Running coprocessor post-open hooks at 1732616775326 (+5 ms)Region opened successfully at 1732616775328 (+2 ms) 2024-11-26T10:26:15,335 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732616775242 2024-11-26T10:26:15,346 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:26:15,346 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:26:15,347 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,40793,1732616773529 2024-11-26T10:26:15,349 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,40793,1732616773529, state=OPEN 2024-11-26T10:26:15,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:26:15,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:26:15,362 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:26:15,362 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,40793,1732616773529 2024-11-26T10:26:15,362 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:26:15,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:26:15,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,40793,1732616773529 in 286 msec 2024-11-26T10:26:15,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:26:15,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 739 msec 2024-11-26T10:26:15,375 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:26:15,375 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:26:15,394 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:26:15,395 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,40793,1732616773529, seqNum=-1] 2024-11-26T10:26:15,416 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:26:15,418 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36755, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:26:15,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0030 sec 2024-11-26T10:26:15,438 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732616775437, completionTime=-1 2024-11-26T10:26:15,440 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:26:15,440 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:26:15,468 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:26:15,468 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732616835468 2024-11-26T10:26:15,468 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732616895468 2024-11-26T10:26:15,468 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-11-26T10:26:15,471 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,42131,1732616772849-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,471 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,42131,1732616772849-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,471 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,42131,1732616772849-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,473 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:42131, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,473 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,474 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,481 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:26:15,506 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.831sec 2024-11-26T10:26:15,508 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:26:15,509 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:26:15,510 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:26:15,510 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:26:15,510 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:26:15,511 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,42131,1732616772849-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:26:15,512 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,42131,1732616772849-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:26:15,519 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:26:15,520 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:26:15,521 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,42131,1732616772849-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:26:15,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:26:15,535 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-26T10:26:15,535 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-26T10:26:15,539 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,42131,-1 for getting cluster id 2024-11-26T10:26:15,541 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:26:15,549 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3528e78f-e046-48e8-9fcd-8372e94e2855' 2024-11-26T10:26:15,551 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:26:15,552 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3528e78f-e046-48e8-9fcd-8372e94e2855" 2024-11-26T10:26:15,552 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@540c4ea5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:26:15,552 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,42131,-1] 2024-11-26T10:26:15,555 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:26:15,556 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:26:15,557 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48364, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:26:15,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:26:15,561 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:26:15,567 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,40793,1732616773529, seqNum=-1] 2024-11-26T10:26:15,568 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:26:15,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39992, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:26:15,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,42131,1732616772849 2024-11-26T10:26:15,591 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:26:15,599 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:26:15,621 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-26T10:26:15,627 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 94eedbb855cf,42131,1732616772849 2024-11-26T10:26:15,631 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@63a7af04 2024-11-26T10:26:15,632 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:26:15,635 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:26:15,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-26T10:26:15,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-26T10:26:15,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:26:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-26T10:26:15,657 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:26:15,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-26T10:26:15,660 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:15,662 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:26:15,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:26:15,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741835_1011 (size=389) 2024-11-26T10:26:15,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741835_1011 (size=389) 2024-11-26T10:26:16,145 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 584534eef480da6884f3b883bc63829e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db 2024-11-26T10:26:16,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741836_1012 (size=72) 2024-11-26T10:26:16,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741836_1012 (size=72) 2024-11-26T10:26:16,159 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:26:16,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 584534eef480da6884f3b883bc63829e, disabling compactions & flushes 2024-11-26T10:26:16,160 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. after waiting 0 ms 2024-11-26T10:26:16,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,160 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 584534eef480da6884f3b883bc63829e: Waiting for close lock at 1732616776159Disabling compacts and flushes for region at 1732616776159Disabling writes for close at 1732616776160 (+1 ms)Writing region close event to WAL at 1732616776160Closed at 1732616776160 2024-11-26T10:26:16,163 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:26:16,168 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732616776163"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732616776163"}]},"ts":"1732616776163"} 2024-11-26T10:26:16,174 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-26T10:26:16,176 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:26:16,179 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616776176"}]},"ts":"1732616776176"} 2024-11-26T10:26:16,184 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-26T10:26:16,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=584534eef480da6884f3b883bc63829e, ASSIGN}] 2024-11-26T10:26:16,188 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=584534eef480da6884f3b883bc63829e, ASSIGN 2024-11-26T10:26:16,190 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=584534eef480da6884f3b883bc63829e, ASSIGN; state=OFFLINE, location=94eedbb855cf,40793,1732616773529; forceNewPlan=false, retain=false 2024-11-26T10:26:16,342 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=584534eef480da6884f3b883bc63829e, regionState=OPENING, regionLocation=94eedbb855cf,40793,1732616773529 2024-11-26T10:26:16,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=584534eef480da6884f3b883bc63829e, ASSIGN because future has completed 2024-11-26T10:26:16,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 584534eef480da6884f3b883bc63829e, server=94eedbb855cf,40793,1732616773529}] 2024-11-26T10:26:16,510 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,510 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 584534eef480da6884f3b883bc63829e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:26:16,511 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,511 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:26:16,511 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,512 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,515 INFO [StoreOpener-584534eef480da6884f3b883bc63829e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,518 INFO [StoreOpener-584534eef480da6884f3b883bc63829e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 584534eef480da6884f3b883bc63829e columnFamilyName info 2024-11-26T10:26:16,518 DEBUG [StoreOpener-584534eef480da6884f3b883bc63829e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:26:16,519 INFO [StoreOpener-584534eef480da6884f3b883bc63829e-1 {}] regionserver.HStore(327): Store=584534eef480da6884f3b883bc63829e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:26:16,519 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,521 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,522 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,523 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,523 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,528 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,532 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:26:16,533 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 584534eef480da6884f3b883bc63829e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757068, jitterRate=-0.037339404225349426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:26:16,533 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:16,534 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 584534eef480da6884f3b883bc63829e: Running coprocessor pre-open hook at 1732616776512Writing region info on filesystem at 1732616776512Initializing all the Stores at 1732616776514 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616776514Cleaning up temporary data from old regions at 1732616776523 (+9 ms)Running coprocessor post-open hooks at 1732616776533 (+10 ms)Region opened successfully at 1732616776534 (+1 ms) 2024-11-26T10:26:16,536 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e., pid=6, masterSystemTime=1732616776502 2024-11-26T10:26:16,541 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,542 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:16,543 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=584534eef480da6884f3b883bc63829e, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,40793,1732616773529 2024-11-26T10:26:16,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 584534eef480da6884f3b883bc63829e, server=94eedbb855cf,40793,1732616773529 because future has completed 2024-11-26T10:26:16,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-26T10:26:16,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 584534eef480da6884f3b883bc63829e, server=94eedbb855cf,40793,1732616773529 in 204 msec 2024-11-26T10:26:16,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-26T10:26:16,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=584534eef480da6884f3b883bc63829e, ASSIGN in 371 msec 2024-11-26T10:26:16,563 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:26:16,563 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616776563"}]},"ts":"1732616776563"} 2024-11-26T10:26:16,567 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-26T10:26:16,568 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:26:16,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 923 msec 2024-11-26T10:26:20,806 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:26:20,851 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:26:20,853 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-26T10:26:23,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-26T10:26:23,255 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-26T10:26:23,257 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-26T10:26:23,257 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-26T10:26:23,258 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:26:23,258 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-26T10:26:23,258 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-26T10:26:23,258 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-26T10:26:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42131 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:26:25,697 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-26T10:26:25,699 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-26T10:26:25,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-26T10:26:25,706 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:26:25,707 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616785707 2024-11-26T10:26:25,717 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:25,718 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:25,718 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:25,718 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:25,718 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:25,718 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616774933 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616785707 2024-11-26T10:26:25,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40019:40019),(127.0.0.1/127.0.0.1:33163:33163)] 2024-11-26T10:26:25,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616774933 is not closed yet, will try archiving it next time 2024-11-26T10:26:25,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741833_1009 (size=451) 2024-11-26T10:26:25,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741833_1009 (size=451) 2024-11-26T10:26:25,729 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616774933 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616774933 2024-11-26T10:26:25,730 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e., hostname=94eedbb855cf,40793,1732616773529, seqNum=2] 2024-11-26T10:26:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40793 {}] regionserver.HRegion(8855): Flush requested on 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:37,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 584534eef480da6884f3b883bc63829e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:26:37,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/825670b79098441ca6c984620dee27ef is 1080, key is row0001/info:/1732616785733/Put/seqid=0 2024-11-26T10:26:37,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741838_1014 (size=12509) 2024-11-26T10:26:37,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741838_1014 (size=12509) 2024-11-26T10:26:37,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/825670b79098441ca6c984620dee27ef 2024-11-26T10:26:37,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/825670b79098441ca6c984620dee27ef as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef 2024-11-26T10:26:37,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef, entries=7, sequenceid=11, filesize=12.2 K 2024-11-26T10:26:37,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 584534eef480da6884f3b883bc63829e in 164ms, sequenceid=11, compaction requested=false 2024-11-26T10:26:37,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 584534eef480da6884f3b883bc63829e: 2024-11-26T10:26:41,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:26:45,776 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616805776 2024-11-26T10:26:45,985 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:26:45,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:45,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:45,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:45,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:45,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:45,986 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616785707 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616805776 2024-11-26T10:26:45,987 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:26:45,987 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616785707 is not closed yet, will try archiving it next time 2024-11-26T10:26:45,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741837_1013 (size=12399) 2024-11-26T10:26:45,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741837_1013 (size=12399) 2024-11-26T10:26:46,190 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:48,394 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:50,598 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:52,802 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40793 {}] regionserver.HRegion(8855): Flush requested on 584534eef480da6884f3b883bc63829e 2024-11-26T10:26:52,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 584534eef480da6884f3b883bc63829e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:26:53,004 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:53,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/46d4e1c6c54b47f79a0b70314d2aeb76 is 1080, key is row0008/info:/1732616799765/Put/seqid=0 2024-11-26T10:26:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741840_1016 (size=12509) 2024-11-26T10:26:53,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741840_1016 (size=12509) 2024-11-26T10:26:53,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/46d4e1c6c54b47f79a0b70314d2aeb76 2024-11-26T10:26:53,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/46d4e1c6c54b47f79a0b70314d2aeb76 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/46d4e1c6c54b47f79a0b70314d2aeb76 2024-11-26T10:26:53,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/46d4e1c6c54b47f79a0b70314d2aeb76, entries=7, sequenceid=21, filesize=12.2 K 2024-11-26T10:26:53,241 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:53,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 584534eef480da6884f3b883bc63829e in 439ms, sequenceid=21, compaction requested=false 2024-11-26T10:26:53,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 584534eef480da6884f3b883bc63829e: 2024-11-26T10:26:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-26T10:26:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:26:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef because midkey is the same as first or last row 2024-11-26T10:26:55,006 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:56,167 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-26T10:26:56,167 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-26T10:26:57,210 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:57,212 WARN [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:57,213 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C40793%2C1732616773529:(num 1732616805776) roll requested 2024-11-26T10:26:57,214 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616817214 2024-11-26T10:26:57,422 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:26:57,422 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:57,422 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:57,422 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:57,422 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:57,422 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:26:57,423 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616805776 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616817214 2024-11-26T10:26:57,424 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:26:57,424 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616805776 is not closed yet, will try archiving it next time 2024-11-26T10:26:57,424 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616785707 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616785707 2024-11-26T10:26:57,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741839_1015 (size=7739) 2024-11-26T10:26:57,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741839_1015 (size=7739) 2024-11-26T10:26:59,414 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:01,511 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 584534eef480da6884f3b883bc63829e, had cached 0 bytes from a total of 25018 2024-11-26T10:27:01,619 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:03,824 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:06,028 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:08,030 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-26T10:27:08,031 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616828031 2024-11-26T10:27:11,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:27:13,044 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:13,047 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:13,047 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:13,047 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C40793%2C1732616773529:(num 1732616828031) roll requested 2024-11-26T10:27:13,048 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:13,048 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:13,049 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:13,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:13,054 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616817214 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616828031 2024-11-26T10:27:13,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741841_1017 (size=4753) 2024-11-26T10:27:13,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741841_1017 (size=4753) 2024-11-26T10:27:13,061 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40019:40019),(127.0.0.1/127.0.0.1:33163:33163)] 2024-11-26T10:27:13,061 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616817214 is not closed yet, will try archiving it next time 2024-11-26T10:27:13,062 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616833062 2024-11-26T10:27:18,065 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:27:18,066 WARN [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:27:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40793 {}] regionserver.HRegion(8855): Flush requested on 584534eef480da6884f3b883bc63829e 2024-11-26T10:27:18,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 584534eef480da6884f3b883bc63829e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:27:18,074 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:27:18,074 WARN [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:27:20,067 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-26T10:27:23,069 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:27:23,069 WARN [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK]] 2024-11-26T10:27:23,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:23,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:23,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:23,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:23,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:23,070 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616828031 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 2024-11-26T10:27:23,071 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:27:23,071 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616828031 is not closed yet, will try archiving it next time 2024-11-26T10:27:23,071 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C40793%2C1732616773529:(num 1732616833062) roll requested 2024-11-26T10:27:23,072 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616843071 2024-11-26T10:27:23,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/a4cff5667b154c12be64ebdab4ae2e56 is 1080, key is row0015/info:/1732616814804/Put/seqid=0 2024-11-26T10:27:23,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741842_1018 (size=1569) 2024-11-26T10:27:23,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741842_1018 (size=1569) 2024-11-26T10:27:23,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741844_1020 (size=12509) 2024-11-26T10:27:23,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741844_1020 (size=12509) 2024-11-26T10:27:23,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/a4cff5667b154c12be64ebdab4ae2e56 2024-11-26T10:27:23,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/a4cff5667b154c12be64ebdab4ae2e56 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/a4cff5667b154c12be64ebdab4ae2e56 2024-11-26T10:27:23,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/a4cff5667b154c12be64ebdab4ae2e56, entries=7, sequenceid=31, filesize=12.2 K 2024-11-26T10:27:28,080 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:28,080 WARN [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:28,108 INFO [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:28,108 WARN [FSHLog-0-hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db-prefix:94eedbb855cf,40793,1732616773529 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-33479667-31e2-4cdb-89d5-f226c25374ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38569,DS-a4eca5da-f104-446e-9c68-c308706db69e,DISK]] 2024-11-26T10:27:28,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 584534eef480da6884f3b883bc63829e in 10042ms, sequenceid=31, compaction requested=true 2024-11-26T10:27:28,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 584534eef480da6884f3b883bc63829e: 2024-11-26T10:27:28,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,109 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-26T10:27:28,109 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:27:28,109 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,109 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef because midkey is the same as first or last row 2024-11-26T10:27:28,109 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,109 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 2024-11-26T10:27:28,110 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33163:33163),(127.0.0.1/127.0.0.1:40019:40019)] 2024-11-26T10:27:28,110 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,111 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616805776 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616805776 2024-11-26T10:27:28,111 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C40793%2C1732616773529:(num 1732616843071) roll requested 2024-11-26T10:27:28,111 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616848111 2024-11-26T10:27:28,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741843_1019 (size=438) 2024-11-26T10:27:28,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741843_1019 (size=438) 2024-11-26T10:27:28,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 584534eef480da6884f3b883bc63829e:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:27:28,114 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616817214 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616817214 2024-11-26T10:27:28,115 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:27:28,115 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616828031 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616828031 2024-11-26T10:27:28,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:27:28,118 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:27:28,120 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.HStore(1541): 584534eef480da6884f3b883bc63829e/info is initiating minor compaction (all files) 2024-11-26T10:27:28,120 INFO [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 584534eef480da6884f3b883bc63829e/info in TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:27:28,121 INFO [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/46d4e1c6c54b47f79a0b70314d2aeb76, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/a4cff5667b154c12be64ebdab4ae2e56] into tmpdir=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp, totalSize=36.6 K 2024-11-26T10:27:28,122 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] compactions.Compactor(225): Compacting 825670b79098441ca6c984620dee27ef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732616785733 2024-11-26T10:27:28,123 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] compactions.Compactor(225): Compacting 46d4e1c6c54b47f79a0b70314d2aeb76, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732616799765 2024-11-26T10:27:28,124 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] compactions.Compactor(225): Compacting a4cff5667b154c12be64ebdab4ae2e56, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732616814804 2024-11-26T10:27:28,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,124 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,124 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,124 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,124 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616848111 2024-11-26T10:27:28,125 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40019:40019),(127.0.0.1/127.0.0.1:33163:33163)] 2024-11-26T10:27:28,126 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,126 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,126 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C40793%2C1732616773529.1732616848126 2024-11-26T10:27:28,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741845_1021 (size=93) 2024-11-26T10:27:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741845_1021 (size=93) 2024-11-26T10:27:28,146 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:28,148 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616848111 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616848126 2024-11-26T10:27:28,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741846_1022 (size=1258) 2024-11-26T10:27:28,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741846_1022 (size=1258) 2024-11-26T10:27:28,154 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,154 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,164 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40019:40019),(127.0.0.1/127.0.0.1:33163:33163)] 2024-11-26T10:27:28,164 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,164 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,169 INFO [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 584534eef480da6884f3b883bc63829e#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:27:28,170 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/464c9a432cd5494f9b6e53b52110ee13 is 1080, key is row0001/info:/1732616785733/Put/seqid=0 2024-11-26T10:27:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741848_1024 (size=27710) 2024-11-26T10:27:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741848_1024 (size=27710) 2024-11-26T10:27:28,187 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/464c9a432cd5494f9b6e53b52110ee13 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/464c9a432cd5494f9b6e53b52110ee13 2024-11-26T10:27:28,203 INFO [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 584534eef480da6884f3b883bc63829e/info of 584534eef480da6884f3b883bc63829e into 464c9a432cd5494f9b6e53b52110ee13(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:27:28,204 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 584534eef480da6884f3b883bc63829e: 2024-11-26T10:27:28,205 INFO [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e., storeName=584534eef480da6884f3b883bc63829e/info, priority=13, startTime=1732616848110; duration=0sec 2024-11-26T10:27:28,205 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/464c9a432cd5494f9b6e53b52110ee13 because midkey is the same as first or last row 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/464c9a432cd5494f9b6e53b52110ee13 because midkey is the same as first or last row 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/464c9a432cd5494f9b6e53b52110ee13 because midkey is the same as first or last row 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:27:28,206 DEBUG [RS:0;94eedbb855cf:40793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 584534eef480da6884f3b883bc63829e:info 2024-11-26T10:27:28,513 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(879): hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 is not closed yet, will try archiving it next time 2024-11-26T10:27:28,514 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616833062 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616833062 2024-11-26T10:27:28,530 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/WALs/94eedbb855cf,40793,1732616773529/94eedbb855cf%2C40793%2C1732616773529.1732616843071 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs/94eedbb855cf%2C40793%2C1732616773529.1732616843071 2024-11-26T10:27:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40793 {}] regionserver.HRegion(8855): Flush requested on 584534eef480da6884f3b883bc63829e 2024-11-26T10:27:40,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 584534eef480da6884f3b883bc63829e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:27:40,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/daa729bb84b44bdda71c49b8fe526fd3 is 1080, key is row0022/info:/1732616848127/Put/seqid=0 2024-11-26T10:27:40,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741849_1025 (size=12509) 2024-11-26T10:27:40,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741849_1025 (size=12509) 2024-11-26T10:27:40,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/daa729bb84b44bdda71c49b8fe526fd3 2024-11-26T10:27:40,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/daa729bb84b44bdda71c49b8fe526fd3 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/daa729bb84b44bdda71c49b8fe526fd3 2024-11-26T10:27:40,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/daa729bb84b44bdda71c49b8fe526fd3, entries=7, sequenceid=42, filesize=12.2 K 2024-11-26T10:27:40,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 584534eef480da6884f3b883bc63829e in 34ms, sequenceid=42, compaction requested=false 2024-11-26T10:27:40,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 584534eef480da6884f3b883bc63829e: 2024-11-26T10:27:40,185 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-26T10:27:40,185 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:27:40,186 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/464c9a432cd5494f9b6e53b52110ee13 because midkey is the same as first or last row 2024-11-26T10:27:41,925 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:27:46,512 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 584534eef480da6884f3b883bc63829e, had cached 0 bytes from a total of 40219 2024-11-26T10:27:48,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:27:48,163 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:27:48,164 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:27:48,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:48,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:48,169 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:27:48,169 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:27:48,169 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2135136293, stopped=false 2024-11-26T10:27:48,170 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,42131,1732616772849 2024-11-26T10:27:48,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:48,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:48,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:48,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:48,172 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:27:48,172 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:27:48,172 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:27:48,173 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:48,173 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:48,173 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:48,173 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,40793,1732616773529' ***** 2024-11-26T10:27:48,173 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:27:48,173 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:27:48,174 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:27:48,174 INFO [RS:0;94eedbb855cf:40793 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:27:48,174 INFO [RS:0;94eedbb855cf:40793 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:27:48,174 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(3091): Received CLOSE for 584534eef480da6884f3b883bc63829e 2024-11-26T10:27:48,175 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,40793,1732616773529 2024-11-26T10:27:48,175 INFO [RS:0;94eedbb855cf:40793 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:27:48,175 INFO [RS:0;94eedbb855cf:40793 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:40793. 2024-11-26T10:27:48,175 DEBUG [RS:0;94eedbb855cf:40793 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:27:48,175 DEBUG [RS:0;94eedbb855cf:40793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:48,175 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 584534eef480da6884f3b883bc63829e, disabling compactions & flushes 2024-11-26T10:27:48,175 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:27:48,175 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:27:48,175 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:27:48,175 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:27:48,175 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:27:48,175 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. after waiting 0 ms 2024-11-26T10:27:48,176 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:27:48,176 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:27:48,176 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 584534eef480da6884f3b883bc63829e 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-26T10:27:48,176 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-26T10:27:48,176 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:27:48,176 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:27:48,176 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1325): Online Regions={584534eef480da6884f3b883bc63829e=TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e., 1588230740=hbase:meta,,1.1588230740} 2024-11-26T10:27:48,176 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:27:48,176 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:27:48,176 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:27:48,177 DEBUG [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 584534eef480da6884f3b883bc63829e 2024-11-26T10:27:48,177 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-26T10:27:48,182 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/63e8c2338ca649218dfde59023600c1f is 1080, key is row0029/info:/1732616862153/Put/seqid=0 2024-11-26T10:27:48,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741850_1026 (size=8193) 2024-11-26T10:27:48,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741850_1026 (size=8193) 2024-11-26T10:27:48,194 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/63e8c2338ca649218dfde59023600c1f 2024-11-26T10:27:48,202 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/info/a68413d6a8a84b0badc148677ed8f97b is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e./info:regioninfo/1732616776543/Put/seqid=0 2024-11-26T10:27:48,204 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/.tmp/info/63e8c2338ca649218dfde59023600c1f as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/63e8c2338ca649218dfde59023600c1f 2024-11-26T10:27:48,212 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/63e8c2338ca649218dfde59023600c1f, entries=3, sequenceid=48, filesize=8.0 K 2024-11-26T10:27:48,214 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 584534eef480da6884f3b883bc63829e in 37ms, sequenceid=48, compaction requested=true 2024-11-26T10:27:48,214 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/46d4e1c6c54b47f79a0b70314d2aeb76, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/a4cff5667b154c12be64ebdab4ae2e56] to archive 2024-11-26T10:27:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741851_1027 (size=7016) 2024-11-26T10:27:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741851_1027 (size=7016) 2024-11-26T10:27:48,217 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/info/a68413d6a8a84b0badc148677ed8f97b 2024-11-26T10:27:48,218 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:27:48,221 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/archive/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/825670b79098441ca6c984620dee27ef 2024-11-26T10:27:48,223 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/46d4e1c6c54b47f79a0b70314d2aeb76 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/archive/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/46d4e1c6c54b47f79a0b70314d2aeb76 2024-11-26T10:27:48,225 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/a4cff5667b154c12be64ebdab4ae2e56 to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/archive/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/info/a4cff5667b154c12be64ebdab4ae2e56 2024-11-26T10:27:48,237 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=94eedbb855cf:42131 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-26T10:27:48,238 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [825670b79098441ca6c984620dee27ef=12509, 46d4e1c6c54b47f79a0b70314d2aeb76=12509, a4cff5667b154c12be64ebdab4ae2e56=12509] 2024-11-26T10:27:48,244 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/default/TestLogRolling-testSlowSyncLogRolling/584534eef480da6884f3b883bc63829e/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-26T10:27:48,247 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:27:48,247 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 584534eef480da6884f3b883bc63829e: Waiting for close lock at 1732616868175Running coprocessor pre-close hooks at 1732616868175Disabling compacts and flushes for region at 1732616868175Disabling writes for close at 1732616868176 (+1 ms)Obtaining lock to block concurrent updates at 1732616868176Preparing flush snapshotting stores in 584534eef480da6884f3b883bc63829e at 1732616868176Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732616868176Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. at 1732616868177 (+1 ms)Flushing 584534eef480da6884f3b883bc63829e/info: creating writer at 1732616868178 (+1 ms)Flushing 584534eef480da6884f3b883bc63829e/info: appending metadata at 1732616868182 (+4 ms)Flushing 584534eef480da6884f3b883bc63829e/info: closing flushed file at 1732616868182Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b8e2740: reopening flushed file at 1732616868203 (+21 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 584534eef480da6884f3b883bc63829e in 37ms, sequenceid=48, compaction requested=true at 1732616868214 (+11 ms)Writing region close event to WAL at 1732616868239 (+25 ms)Running coprocessor post-close hooks at 1732616868245 (+6 ms)Closed at 1732616868247 (+2 ms) 2024-11-26T10:27:48,248 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732616775637.584534eef480da6884f3b883bc63829e. 2024-11-26T10:27:48,248 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/ns/da56b57be05e4e24819d729b68f89ce9 is 43, key is default/ns:d/1732616775422/Put/seqid=0 2024-11-26T10:27:48,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741852_1028 (size=5153) 2024-11-26T10:27:48,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741852_1028 (size=5153) 2024-11-26T10:27:48,256 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/ns/da56b57be05e4e24819d729b68f89ce9 2024-11-26T10:27:48,281 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/table/10cbcf25c5884e8d83743b90ca57d15a is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732616776563/Put/seqid=0 2024-11-26T10:27:48,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741853_1029 (size=5396) 2024-11-26T10:27:48,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741853_1029 (size=5396) 2024-11-26T10:27:48,287 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/table/10cbcf25c5884e8d83743b90ca57d15a 2024-11-26T10:27:48,295 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/info/a68413d6a8a84b0badc148677ed8f97b as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/info/a68413d6a8a84b0badc148677ed8f97b 2024-11-26T10:27:48,302 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/info/a68413d6a8a84b0badc148677ed8f97b, entries=10, sequenceid=11, filesize=6.9 K 2024-11-26T10:27:48,304 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/ns/da56b57be05e4e24819d729b68f89ce9 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/ns/da56b57be05e4e24819d729b68f89ce9 2024-11-26T10:27:48,311 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/ns/da56b57be05e4e24819d729b68f89ce9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-26T10:27:48,312 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/.tmp/table/10cbcf25c5884e8d83743b90ca57d15a as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/table/10cbcf25c5884e8d83743b90ca57d15a 2024-11-26T10:27:48,319 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/table/10cbcf25c5884e8d83743b90ca57d15a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-26T10:27:48,320 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false 2024-11-26T10:27:48,325 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-26T10:27:48,326 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:27:48,326 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:27:48,326 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616868176Running coprocessor pre-close hooks at 1732616868176Disabling compacts and flushes for region at 1732616868176Disabling writes for close at 1732616868176Obtaining lock to block concurrent updates at 1732616868177 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732616868177Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732616868177Flushing stores of hbase:meta,,1.1588230740 at 1732616868178 (+1 ms)Flushing 1588230740/info: creating writer at 1732616868178Flushing 1588230740/info: appending metadata at 1732616868201 (+23 ms)Flushing 1588230740/info: closing flushed file at 1732616868201Flushing 1588230740/ns: creating writer at 1732616868225 (+24 ms)Flushing 1588230740/ns: appending metadata at 1732616868248 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1732616868248Flushing 1588230740/table: creating writer at 1732616868265 (+17 ms)Flushing 1588230740/table: appending metadata at 1732616868280 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732616868280Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e008fe6: reopening flushed file at 1732616868294 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7652056a: reopening flushed file at 1732616868303 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@740056b7: reopening flushed file at 1732616868311 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false at 1732616868320 (+9 ms)Writing region close event to WAL at 1732616868321 (+1 ms)Running coprocessor post-close hooks at 1732616868326 (+5 ms)Closed at 1732616868326 2024-11-26T10:27:48,327 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:27:48,377 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,40793,1732616773529; all regions closed. 2024-11-26T10:27:48,378 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741834_1010 (size=3066) 2024-11-26T10:27:48,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741834_1010 (size=3066) 2024-11-26T10:27:48,385 DEBUG [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs 2024-11-26T10:27:48,385 INFO [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C40793%2C1732616773529.meta:.meta(num 1732616775272) 2024-11-26T10:27:48,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741847_1023 (size=12695) 2024-11-26T10:27:48,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741847_1023 (size=12695) 2024-11-26T10:27:48,391 DEBUG [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/oldWALs 2024-11-26T10:27:48,391 INFO [RS:0;94eedbb855cf:40793 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C40793%2C1732616773529:(num 1732616848126) 2024-11-26T10:27:48,391 DEBUG [RS:0;94eedbb855cf:40793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:48,391 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:27:48,391 INFO [RS:0;94eedbb855cf:40793 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:27:48,392 INFO [RS:0;94eedbb855cf:40793 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-26T10:27:48,392 INFO [RS:0;94eedbb855cf:40793 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:27:48,392 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:27:48,392 INFO [RS:0;94eedbb855cf:40793 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40793 2024-11-26T10:27:48,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,40793,1732616773529 2024-11-26T10:27:48,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:27:48,396 INFO [RS:0;94eedbb855cf:40793 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:27:48,399 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,40793,1732616773529] 2024-11-26T10:27:48,401 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,40793,1732616773529 already deleted, retry=false 2024-11-26T10:27:48,401 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,40793,1732616773529 expired; onlineServers=0 2024-11-26T10:27:48,401 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,42131,1732616772849' ***** 2024-11-26T10:27:48,401 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:27:48,401 INFO [M:0;94eedbb855cf:42131 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:27:48,401 INFO [M:0;94eedbb855cf:42131 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:27:48,401 DEBUG [M:0;94eedbb855cf:42131 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:27:48,402 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:27:48,402 DEBUG [M:0;94eedbb855cf:42131 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:27:48,402 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616774556 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616774556,5,FailOnTimeoutGroup] 2024-11-26T10:27:48,402 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616774555 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616774555,5,FailOnTimeoutGroup] 2024-11-26T10:27:48,402 INFO [M:0;94eedbb855cf:42131 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:27:48,402 INFO [M:0;94eedbb855cf:42131 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:27:48,402 DEBUG [M:0;94eedbb855cf:42131 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:27:48,402 INFO [M:0;94eedbb855cf:42131 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:27:48,402 INFO [M:0;94eedbb855cf:42131 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:27:48,402 INFO [M:0;94eedbb855cf:42131 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:27:48,403 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:27:48,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:27:48,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:48,403 DEBUG [M:0;94eedbb855cf:42131 {}] zookeeper.ZKUtil(347): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:27:48,404 WARN [M:0;94eedbb855cf:42131 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:27:48,404 INFO [M:0;94eedbb855cf:42131 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/.lastflushedseqids 2024-11-26T10:27:48,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741854_1030 (size=130) 2024-11-26T10:27:48,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741854_1030 (size=130) 2024-11-26T10:27:48,419 INFO [M:0;94eedbb855cf:42131 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:27:48,419 INFO [M:0;94eedbb855cf:42131 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:27:48,419 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:27:48,419 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:48,419 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:48,419 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:27:48,419 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:48,420 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-26T10:27:48,438 DEBUG [M:0;94eedbb855cf:42131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7dc25192ba14b4683b45183a267b8dc is 82, key is hbase:meta,,1/info:regioninfo/1732616775347/Put/seqid=0 2024-11-26T10:27:48,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741855_1031 (size=5672) 2024-11-26T10:27:48,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741855_1031 (size=5672) 2024-11-26T10:27:48,445 INFO [M:0;94eedbb855cf:42131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7dc25192ba14b4683b45183a267b8dc 2024-11-26T10:27:48,469 DEBUG [M:0;94eedbb855cf:42131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/704b50b80f824aa08825d153519b17f3 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732616776570/Put/seqid=0 2024-11-26T10:27:48,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741856_1032 (size=6247) 2024-11-26T10:27:48,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741856_1032 (size=6247) 2024-11-26T10:27:48,476 INFO [M:0;94eedbb855cf:42131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/704b50b80f824aa08825d153519b17f3 2024-11-26T10:27:48,482 INFO [M:0;94eedbb855cf:42131 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 704b50b80f824aa08825d153519b17f3 2024-11-26T10:27:48,498 DEBUG [M:0;94eedbb855cf:42131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e20a49f1feb40c0b6c8a765e37ef65b is 69, key is 94eedbb855cf,40793,1732616773529/rs:state/1732616774708/Put/seqid=0 2024-11-26T10:27:48,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:48,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40793-0x10153cfc42e0001, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:48,500 INFO [RS:0;94eedbb855cf:40793 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:27:48,500 INFO [RS:0;94eedbb855cf:40793 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,40793,1732616773529; zookeeper connection closed. 2024-11-26T10:27:48,501 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9a92020 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9a92020 2024-11-26T10:27:48,502 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:27:48,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741857_1033 (size=5156) 2024-11-26T10:27:48,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741857_1033 (size=5156) 2024-11-26T10:27:48,505 INFO [M:0;94eedbb855cf:42131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e20a49f1feb40c0b6c8a765e37ef65b 2024-11-26T10:27:48,527 DEBUG [M:0;94eedbb855cf:42131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2279034e554b4962830385b9622a10a4 is 52, key is load_balancer_on/state:d/1732616775596/Put/seqid=0 2024-11-26T10:27:48,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741858_1034 (size=5056) 2024-11-26T10:27:48,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741858_1034 (size=5056) 2024-11-26T10:27:48,781 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:27:48,940 INFO [M:0;94eedbb855cf:42131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2279034e554b4962830385b9622a10a4 2024-11-26T10:27:48,948 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7dc25192ba14b4683b45183a267b8dc as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7dc25192ba14b4683b45183a267b8dc 2024-11-26T10:27:48,954 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7dc25192ba14b4683b45183a267b8dc, entries=8, sequenceid=59, filesize=5.5 K 2024-11-26T10:27:48,955 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/704b50b80f824aa08825d153519b17f3 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/704b50b80f824aa08825d153519b17f3 2024-11-26T10:27:48,961 INFO [M:0;94eedbb855cf:42131 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 704b50b80f824aa08825d153519b17f3 2024-11-26T10:27:48,961 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/704b50b80f824aa08825d153519b17f3, entries=6, sequenceid=59, filesize=6.1 K 2024-11-26T10:27:48,962 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e20a49f1feb40c0b6c8a765e37ef65b as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3e20a49f1feb40c0b6c8a765e37ef65b 2024-11-26T10:27:48,968 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3e20a49f1feb40c0b6c8a765e37ef65b, entries=1, sequenceid=59, filesize=5.0 K 2024-11-26T10:27:48,969 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2279034e554b4962830385b9622a10a4 as hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2279034e554b4962830385b9622a10a4 2024-11-26T10:27:48,975 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2279034e554b4962830385b9622a10a4, entries=1, sequenceid=59, filesize=4.9 K 2024-11-26T10:27:48,976 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 557ms, sequenceid=59, compaction requested=false 2024-11-26T10:27:48,978 INFO [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:48,978 DEBUG [M:0;94eedbb855cf:42131 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616868419Disabling compacts and flushes for region at 1732616868419Disabling writes for close at 1732616868419Obtaining lock to block concurrent updates at 1732616868420 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732616868420Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732616868420Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732616868421 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732616868421Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732616868437 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732616868437Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732616868453 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732616868469 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732616868469Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732616868482 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732616868497 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732616868497Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732616868512 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732616868526 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732616868526Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5856ac2c: reopening flushed file at 1732616868947 (+421 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ce6069: reopening flushed file at 1732616868954 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17cc0cff: reopening flushed file at 1732616868961 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20187522: reopening flushed file at 1732616868968 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 557ms, sequenceid=59, compaction requested=false at 1732616868976 (+8 ms)Writing region close event to WAL at 1732616868978 (+2 ms)Closed at 1732616868978 2024-11-26T10:27:48,979 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,979 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,979 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,979 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,979 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:48,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38569 is added to blk_1073741830_1006 (size=27973) 2024-11-26T10:27:48,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741830_1006 (size=27973) 2024-11-26T10:27:48,983 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:27:48,983 INFO [M:0;94eedbb855cf:42131 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:27:48,983 INFO [M:0;94eedbb855cf:42131 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42131 2024-11-26T10:27:48,984 INFO [M:0;94eedbb855cf:42131 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:27:49,086 INFO [M:0;94eedbb855cf:42131 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:27:49,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:49,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42131-0x10153cfc42e0000, quorum=127.0.0.1:54085, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:49,091 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:49,093 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:27:49,093 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:27:49,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:27:49,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir/,STOPPED} 2024-11-26T10:27:49,097 WARN [BP-1555577945-172.17.0.2-1732616769980 heartbeating to localhost/127.0.0.1:33735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:27:49,097 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:27:49,097 WARN [BP-1555577945-172.17.0.2-1732616769980 heartbeating to localhost/127.0.0.1:33735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1555577945-172.17.0.2-1732616769980 (Datanode Uuid 830c34dd-6958-435e-a93e-d9248c2a42de) service to localhost/127.0.0.1:33735 2024-11-26T10:27:49,097 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:27:49,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data3/current/BP-1555577945-172.17.0.2-1732616769980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:49,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data4/current/BP-1555577945-172.17.0.2-1732616769980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:49,099 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:27:49,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:49,105 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:27:49,105 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:27:49,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:27:49,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir/,STOPPED} 2024-11-26T10:27:49,107 WARN [BP-1555577945-172.17.0.2-1732616769980 heartbeating to localhost/127.0.0.1:33735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:27:49,107 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:27:49,107 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:27:49,107 WARN [BP-1555577945-172.17.0.2-1732616769980 heartbeating to localhost/127.0.0.1:33735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1555577945-172.17.0.2-1732616769980 (Datanode Uuid 993adc71-1bd2-4677-a666-3f17729b3704) service to localhost/127.0.0.1:33735 2024-11-26T10:27:49,107 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data1/current/BP-1555577945-172.17.0.2-1732616769980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:49,108 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/cluster_c435eb50-1452-06a4-150b-a0c3ff2e87f0/data/data2/current/BP-1555577945-172.17.0.2-1732616769980 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:49,108 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:27:49,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:27:49,120 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:27:49,120 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:27:49,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:27:49,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir/,STOPPED} 2024-11-26T10:27:49,135 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:27:49,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:27:49,181 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33735 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/94eedbb855cf:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33735 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@53abf30 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33735 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33735 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33735 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/94eedbb855cf:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/94eedbb855cf:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=121 (was 277), ProcessCount=11 (was 11), AvailableMemoryMB=7326 (was 7194) - AvailableMemoryMB LEAK? - 2024-11-26T10:27:49,190 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=121, ProcessCount=11, AvailableMemoryMB=7326 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.log.dir so I do NOT create it in target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6b76334-1149-bed0-0c92-ecd896c5c175/hadoop.tmp.dir so I do NOT create it in target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10, deleteOnExit=true 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/test.cache.data in system properties and HBase conf 2024-11-26T10:27:49,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:27:49,192 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:27:49,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:27:49,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:27:49,210 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:27:49,298 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:49,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:27:49,306 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:27:49,306 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:27:49,306 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:27:49,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:49,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3c3ceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:27:49,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a69944b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:27:49,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3235d5ba{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/java.io.tmpdir/jetty-localhost-35943-hadoop-hdfs-3_4_1-tests_jar-_-any-6698250916971182399/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:27:49,433 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347af0d{HTTP/1.1, (http/1.1)}{localhost:35943} 2024-11-26T10:27:49,433 INFO [Time-limited test {}] server.Server(415): Started @101580ms 2024-11-26T10:27:49,447 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:27:49,584 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:49,588 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:27:49,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:27:49,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:27:49,589 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:27:49,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607b9bc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:27:49,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@670e4080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:27:49,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61e52b83{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/java.io.tmpdir/jetty-localhost-36273-hadoop-hdfs-3_4_1-tests_jar-_-any-13252209510586866100/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:49,706 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a18ee0{HTTP/1.1, (http/1.1)}{localhost:36273} 2024-11-26T10:27:49,706 INFO [Time-limited test {}] server.Server(415): Started @101853ms 2024-11-26T10:27:49,708 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:27:49,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:49,747 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:27:49,748 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:27:49,748 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:27:49,748 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:27:49,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d944f53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:27:49,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f27499{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:27:49,816 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data1/current/BP-1530044533-172.17.0.2-1732616869231/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:49,817 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data2/current/BP-1530044533-172.17.0.2-1732616869231/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:49,838 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:27:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4adac9a3e562c17 with lease ID 0x23131a60342c3aa5: Processing first storage report for DS-f324cf84-67de-408a-815e-e97d9fde0203 from datanode DatanodeRegistration(127.0.0.1:35401, datanodeUuid=bd94ebe6-b444-49e1-ae1c-348940b8f28d, infoPort=40851, infoSecurePort=0, ipcPort=34595, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231) 2024-11-26T10:27:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4adac9a3e562c17 with lease ID 0x23131a60342c3aa5: from storage DS-f324cf84-67de-408a-815e-e97d9fde0203 node DatanodeRegistration(127.0.0.1:35401, datanodeUuid=bd94ebe6-b444-49e1-ae1c-348940b8f28d, infoPort=40851, infoSecurePort=0, ipcPort=34595, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4adac9a3e562c17 with lease ID 0x23131a60342c3aa5: Processing first storage report for DS-1f2c028a-6a7e-490c-ab71-684fc32ca5d2 from datanode DatanodeRegistration(127.0.0.1:35401, datanodeUuid=bd94ebe6-b444-49e1-ae1c-348940b8f28d, infoPort=40851, infoSecurePort=0, ipcPort=34595, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231) 2024-11-26T10:27:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4adac9a3e562c17 with lease ID 0x23131a60342c3aa5: from storage DS-1f2c028a-6a7e-490c-ab71-684fc32ca5d2 node DatanodeRegistration(127.0.0.1:35401, datanodeUuid=bd94ebe6-b444-49e1-ae1c-348940b8f28d, infoPort=40851, infoSecurePort=0, ipcPort=34595, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:49,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cebd4b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/java.io.tmpdir/jetty-localhost-39087-hadoop-hdfs-3_4_1-tests_jar-_-any-14187795735613534169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:49,870 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e6ff2f3{HTTP/1.1, (http/1.1)}{localhost:39087} 2024-11-26T10:27:49,870 INFO [Time-limited test {}] server.Server(415): Started @102017ms 2024-11-26T10:27:49,872 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:27:50,015 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data3/current/BP-1530044533-172.17.0.2-1732616869231/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:50,015 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data4/current/BP-1530044533-172.17.0.2-1732616869231/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:50,038 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:27:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9201e8c772a23004 with lease ID 0x23131a60342c3aa6: Processing first storage report for DS-70ab907b-7694-4394-9d34-747d8265c51b from datanode DatanodeRegistration(127.0.0.1:39131, datanodeUuid=0af9844e-698d-4a3f-9cb8-3ea097e19dc9, infoPort=36865, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231) 2024-11-26T10:27:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9201e8c772a23004 with lease ID 0x23131a60342c3aa6: from storage DS-70ab907b-7694-4394-9d34-747d8265c51b node DatanodeRegistration(127.0.0.1:39131, datanodeUuid=0af9844e-698d-4a3f-9cb8-3ea097e19dc9, infoPort=36865, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9201e8c772a23004 with lease ID 0x23131a60342c3aa6: Processing first storage report for DS-00b340c1-3e11-4c62-a4d8-c4721a03925b from datanode DatanodeRegistration(127.0.0.1:39131, datanodeUuid=0af9844e-698d-4a3f-9cb8-3ea097e19dc9, infoPort=36865, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231) 2024-11-26T10:27:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9201e8c772a23004 with lease ID 0x23131a60342c3aa6: from storage DS-00b340c1-3e11-4c62-a4d8-c4721a03925b node DatanodeRegistration(127.0.0.1:39131, datanodeUuid=0af9844e-698d-4a3f-9cb8-3ea097e19dc9, infoPort=36865, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=1044134892;c=1732616869231), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:50,106 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001 2024-11-26T10:27:50,109 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/zookeeper_0, clientPort=49283, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:27:50,110 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49283 2024-11-26T10:27:50,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:27:50,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:27:50,123 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6 with version=8 2024-11-26T10:27:50,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:27:50,125 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:27:50,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:50,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:50,125 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:27:50,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:50,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:27:50,126 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:27:50,126 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:27:50,126 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37121 2024-11-26T10:27:50,128 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37121 connecting to ZooKeeper ensemble=127.0.0.1:49283 2024-11-26T10:27:50,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:371210x0, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:27:50,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37121-0x10153d1436c0000 connected 2024-11-26T10:27:50,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,158 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:50,158 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6, hbase.cluster.distributed=false 2024-11-26T10:27:50,160 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:27:50,162 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37121 2024-11-26T10:27:50,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37121 2024-11-26T10:27:50,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37121 2024-11-26T10:27:50,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37121 2024-11-26T10:27:50,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37121 2024-11-26T10:27:50,183 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:27:50,183 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:27:50,184 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44389 2024-11-26T10:27:50,185 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44389 connecting to ZooKeeper ensemble=127.0.0.1:49283 2024-11-26T10:27:50,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,188 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443890x0, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:27:50,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:443890x0, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:50,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44389-0x10153d1436c0001 connected 2024-11-26T10:27:50,193 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:27:50,196 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:27:50,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:27:50,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:27:50,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44389 2024-11-26T10:27:50,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44389 2024-11-26T10:27:50,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44389 2024-11-26T10:27:50,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44389 2024-11-26T10:27:50,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44389 2024-11-26T10:27:50,217 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:37121 2024-11-26T10:27:50,217 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:50,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:50,221 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:27:50,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,223 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:27:50,223 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,37121,1732616870125 from backup master directory 2024-11-26T10:27:50,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:50,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:50,225 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:27:50,225 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,232 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/hbase.id] with ID: 4a2174b1-fdc2-4b20-851f-5271b07915fb 2024-11-26T10:27:50,232 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/.tmp/hbase.id 2024-11-26T10:27:50,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:27:50,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:27:50,241 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/.tmp/hbase.id]:[hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/hbase.id] 2024-11-26T10:27:50,255 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:50,256 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:27:50,257 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-26T10:27:50,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:27:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:27:50,269 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:27:50,270 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:27:50,270 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:50,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:27:50,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:27:50,278 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store 2024-11-26T10:27:50,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:27:50,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:27:50,288 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:50,289 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:27:50,289 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:50,289 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:50,289 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:27:50,289 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:50,289 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:50,289 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616870289Disabling compacts and flushes for region at 1732616870289Disabling writes for close at 1732616870289Writing region close event to WAL at 1732616870289Closed at 1732616870289 2024-11-26T10:27:50,290 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/.initializing 2024-11-26T10:27:50,290 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/WALs/94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,293 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C37121%2C1732616870125, suffix=, logDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/WALs/94eedbb855cf,37121,1732616870125, archiveDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/oldWALs, maxLogs=10 2024-11-26T10:27:50,294 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C37121%2C1732616870125.1732616870294 2024-11-26T10:27:50,299 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/WALs/94eedbb855cf,37121,1732616870125/94eedbb855cf%2C37121%2C1732616870125.1732616870294 2024-11-26T10:27:50,300 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36865:36865),(127.0.0.1/127.0.0.1:40851:40851)] 2024-11-26T10:27:50,304 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:27:50,304 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:50,304 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,304 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:27:50,308 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:50,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:27:50,310 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:50,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:27:50,313 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:50,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:27:50,315 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:50,316 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,317 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,317 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,319 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,319 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,319 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:27:50,321 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:50,323 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:27:50,323 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871204, jitterRate=0.10779419541358948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:27:50,325 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732616870305Initializing all the Stores at 1732616870305Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616870306 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616870306Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616870306Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616870306Cleaning up temporary data from old regions at 1732616870319 (+13 ms)Region opened successfully at 1732616870324 (+5 ms) 2024-11-26T10:27:50,325 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:27:50,328 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5edec3b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:27:50,329 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:27:50,330 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:27:50,330 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:27:50,330 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:27:50,330 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-26T10:27:50,331 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-26T10:27:50,331 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:27:50,334 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:27:50,335 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:27:50,337 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:27:50,338 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:27:50,338 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:27:50,339 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:27:50,340 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:27:50,341 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:27:50,342 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:27:50,342 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:27:50,343 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:27:50,346 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:27:50,348 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:27:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,350 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,37121,1732616870125, sessionid=0x10153d1436c0000, setting cluster-up flag (Was=false) 2024-11-26T10:27:50,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,358 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:27:50,359 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,369 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:27:50,370 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,37121,1732616870125 2024-11-26T10:27:50,371 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:27:50,373 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:50,373 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:27:50,373 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:27:50,374 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,37121,1732616870125 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:27:50,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,377 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:50,377 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732616900378 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:27:50,378 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,379 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,379 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:27:50,379 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:27:50,379 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:27:50,379 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:27:50,379 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:27:50,379 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:27:50,380 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616870379,5,FailOnTimeoutGroup] 2024-11-26T10:27:50,380 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616870380,5,FailOnTimeoutGroup] 2024-11-26T10:27:50,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:27:50,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:27:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:27:50,390 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:27:50,390 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6 2024-11-26T10:27:50,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:27:50,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:50,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:27:50,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:27:50,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:27:50,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:50,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:27:50,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:27:50,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:50,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:27:50,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:27:50,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,407 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(746): ClusterId : 4a2174b1-fdc2-4b20-851f-5271b07915fb 2024-11-26T10:27:50,407 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:27:50,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:50,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:27:50,411 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:27:50,411 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:27:50,413 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:27:50,414 DEBUG [RS:0;94eedbb855cf:44389 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dc9b246, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:27:50,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:27:50,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:50,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:50,420 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:27:50,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740 2024-11-26T10:27:50,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740 2024-11-26T10:27:50,423 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:27:50,423 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:27:50,424 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:27:50,426 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:27:50,430 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:44389 2024-11-26T10:27:50,430 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:27:50,430 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:27:50,430 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:27:50,431 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,37121,1732616870125 with port=44389, startcode=1732616870182 2024-11-26T10:27:50,432 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:27:50,432 DEBUG [RS:0;94eedbb855cf:44389 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:27:50,432 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852802, jitterRate=0.08439435064792633}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:27:50,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732616870399Initializing all the Stores at 1732616870399Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616870399Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616870400 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616870400Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616870400Cleaning up temporary data from old regions at 1732616870423 (+23 ms)Region opened successfully at 1732616870434 (+11 ms) 2024-11-26T10:27:50,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:27:50,435 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:27:50,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:27:50,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:27:50,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:27:50,436 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35541, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:27:50,437 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:27:50,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616870434Disabling compacts and flushes for region at 1732616870434Disabling writes for close at 1732616870435 (+1 ms)Writing region close event to WAL at 1732616870436 (+1 ms)Closed at 1732616870436 2024-11-26T10:27:50,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37121 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37121 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,439 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:50,439 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:27:50,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:27:50,440 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6 2024-11-26T10:27:50,440 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46211 2024-11-26T10:27:50,440 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:27:50,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:27:50,443 DEBUG [RS:0;94eedbb855cf:44389 {}] zookeeper.ZKUtil(111): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,443 WARN [RS:0;94eedbb855cf:44389 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:27:50,443 INFO [RS:0;94eedbb855cf:44389 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:50,443 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/WALs/94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,444 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,44389,1732616870182] 2024-11-26T10:27:50,444 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:27:50,445 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:27:50,453 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:27:50,456 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:27:50,457 INFO [RS:0;94eedbb855cf:44389 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:27:50,457 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,458 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:27:50,459 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:27:50,459 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,459 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,459 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,459 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,459 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:27:50,460 DEBUG [RS:0;94eedbb855cf:44389 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:27:50,468 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,468 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,469 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,469 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,469 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,469 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44389,1732616870182-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:27:50,489 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:27:50,489 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44389,1732616870182-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,489 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,490 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.Replication(171): 94eedbb855cf,44389,1732616870182 started 2024-11-26T10:27:50,505 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:50,506 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,44389,1732616870182, RpcServer on 94eedbb855cf/172.17.0.2:44389, sessionid=0x10153d1436c0001 2024-11-26T10:27:50,506 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:27:50,506 DEBUG [RS:0;94eedbb855cf:44389 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,506 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,44389,1732616870182' 2024-11-26T10:27:50,506 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:27:50,507 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:27:50,507 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:27:50,508 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:27:50,508 DEBUG [RS:0;94eedbb855cf:44389 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,508 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,44389,1732616870182' 2024-11-26T10:27:50,508 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:27:50,508 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:27:50,509 DEBUG [RS:0;94eedbb855cf:44389 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:27:50,509 INFO [RS:0;94eedbb855cf:44389 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:27:50,509 INFO [RS:0;94eedbb855cf:44389 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:27:50,595 WARN [94eedbb855cf:37121 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-26T10:27:50,611 INFO [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C44389%2C1732616870182, suffix=, logDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/WALs/94eedbb855cf,44389,1732616870182, archiveDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/oldWALs, maxLogs=32 2024-11-26T10:27:50,613 INFO [RS:0;94eedbb855cf:44389 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C44389%2C1732616870182.1732616870613 2024-11-26T10:27:50,620 INFO [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/WALs/94eedbb855cf,44389,1732616870182/94eedbb855cf%2C44389%2C1732616870182.1732616870613 2024-11-26T10:27:50,621 DEBUG [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40851:40851),(127.0.0.1/127.0.0.1:36865:36865)] 2024-11-26T10:27:50,846 DEBUG [94eedbb855cf:37121 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:27:50,846 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,44389,1732616870182 2024-11-26T10:27:50,848 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,44389,1732616870182, state=OPENING 2024-11-26T10:27:50,850 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:27:50,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:50,853 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:50,853 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:27:50,853 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:50,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,44389,1732616870182}] 2024-11-26T10:27:51,007 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:27:51,010 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:27:51,015 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:27:51,015 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:51,017 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C44389%2C1732616870182.meta, suffix=.meta, logDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/WALs/94eedbb855cf,44389,1732616870182, archiveDir=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/oldWALs, maxLogs=32 2024-11-26T10:27:51,019 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C44389%2C1732616870182.meta.1732616871019.meta 2024-11-26T10:27:51,025 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/WALs/94eedbb855cf,44389,1732616870182/94eedbb855cf%2C44389%2C1732616870182.meta.1732616871019.meta 2024-11-26T10:27:51,026 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40851:40851),(127.0.0.1/127.0.0.1:36865:36865)] 2024-11-26T10:27:51,027 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:27:51,027 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:27:51,027 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:27:51,028 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:27:51,028 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:27:51,028 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:51,028 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:27:51,028 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:27:51,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:27:51,031 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:27:51,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:51,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:51,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:27:51,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:27:51,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:51,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:51,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:27:51,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:27:51,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:51,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:51,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:27:51,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:27:51,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:51,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:51,038 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:27:51,038 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740 2024-11-26T10:27:51,040 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740 2024-11-26T10:27:51,041 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:27:51,042 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:27:51,042 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:27:51,044 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:27:51,044 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=869487, jitterRate=0.10561089217662811}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:27:51,045 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:27:51,046 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732616871028Writing region info on filesystem at 1732616871028Initializing all the Stores at 1732616871029 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616871029Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616871030 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616871030Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616871030Cleaning up temporary data from old regions at 1732616871042 (+12 ms)Running coprocessor post-open hooks at 1732616871045 (+3 ms)Region opened successfully at 1732616871046 (+1 ms) 2024-11-26T10:27:51,047 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732616871007 2024-11-26T10:27:51,050 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:27:51,050 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:27:51,051 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,44389,1732616870182 2024-11-26T10:27:51,052 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,44389,1732616870182, state=OPEN 2024-11-26T10:27:51,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:27:51,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:27:51,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:51,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:51,057 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,44389,1732616870182 2024-11-26T10:27:51,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:27:51,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,44389,1732616870182 in 204 msec 2024-11-26T10:27:51,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:27:51,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 620 msec 2024-11-26T10:27:51,065 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:51,065 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:27:51,067 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:27:51,067 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,44389,1732616870182, seqNum=-1] 2024-11-26T10:27:51,067 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:27:51,069 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51909, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:27:51,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 703 msec 2024-11-26T10:27:51,076 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732616871076, completionTime=-1 2024-11-26T10:27:51,076 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:27:51,076 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:27:51,078 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:27:51,078 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732616931078 2024-11-26T10:27:51,078 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732616991078 2024-11-26T10:27:51,079 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-26T10:27:51,079 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37121,1732616870125-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,079 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37121,1732616870125-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,079 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37121,1732616870125-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,079 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:37121, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,079 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,080 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,082 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.860sec 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37121,1732616870125-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:27:51,085 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37121,1732616870125-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:27:51,088 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:27:51,088 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:27:51,088 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37121,1732616870125-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:51,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:27:51,107 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,37121,-1 for getting cluster id 2024-11-26T10:27:51,107 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:27:51,109 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4a2174b1-fdc2-4b20-851f-5271b07915fb' 2024-11-26T10:27:51,110 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:27:51,110 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4a2174b1-fdc2-4b20-851f-5271b07915fb" 2024-11-26T10:27:51,111 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67dab074, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:27:51,111 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,37121,-1] 2024-11-26T10:27:51,111 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:27:51,112 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:51,114 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49638, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:27:51,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:27:51,115 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:27:51,116 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,44389,1732616870182, seqNum=-1] 2024-11-26T10:27:51,117 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:27:51,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39664, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:27:51,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,37121,1732616870125 2024-11-26T10:27:51,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:51,125 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:27:51,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:27:51,125 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:27:51,125 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:27:51,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:51,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:51,126 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:27:51,126 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:27:51,126 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1420279054, stopped=false 2024-11-26T10:27:51,126 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,37121,1732616870125 2024-11-26T10:27:51,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:51,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:51,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:51,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:51,128 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:27:51,129 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:27:51,129 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:27:51,129 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:51,129 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:51,129 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:51,129 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-26T10:27:51,129 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,44389,1732616870182' ***** 2024-11-26T10:27:51,129 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:27:51,131 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,44389,1732616870182 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:44389. 2024-11-26T10:27:51,131 DEBUG [RS:0;94eedbb855cf:44389 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:27:51,131 DEBUG [RS:0;94eedbb855cf:44389 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:27:51,131 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:27:51,132 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-26T10:27:51,132 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-26T10:27:51,132 DEBUG [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-26T10:27:51,132 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:27:51,132 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:27:51,132 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:27:51,132 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:27:51,132 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:27:51,132 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-26T10:27:51,152 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/.tmp/ns/668cccbbdb28468c99bfe08450bb4b97 is 43, key is default/ns:d/1732616871069/Put/seqid=0 2024-11-26T10:27:51,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741835_1011 (size=5153) 2024-11-26T10:27:51,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741835_1011 (size=5153) 2024-11-26T10:27:51,159 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/.tmp/ns/668cccbbdb28468c99bfe08450bb4b97 2024-11-26T10:27:51,167 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/.tmp/ns/668cccbbdb28468c99bfe08450bb4b97 as hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/ns/668cccbbdb28468c99bfe08450bb4b97 2024-11-26T10:27:51,174 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/ns/668cccbbdb28468c99bfe08450bb4b97, entries=2, sequenceid=6, filesize=5.0 K 2024-11-26T10:27:51,175 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-11-26T10:27:51,175 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:27:51,181 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-26T10:27:51,182 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:27:51,182 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:27:51,182 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616871132Running coprocessor pre-close hooks at 1732616871132Disabling compacts and flushes for region at 1732616871132Disabling writes for close at 1732616871132Obtaining lock to block concurrent updates at 1732616871132Preparing flush snapshotting stores in 1588230740 at 1732616871132Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732616871133 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732616871134 (+1 ms)Flushing 1588230740/ns: creating writer at 1732616871134Flushing 1588230740/ns: appending metadata at 1732616871151 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732616871151Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13a5640b: reopening flushed file at 1732616871166 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1732616871175 (+9 ms)Writing region close event to WAL at 1732616871177 (+2 ms)Running coprocessor post-close hooks at 1732616871182 (+5 ms)Closed at 1732616871182 2024-11-26T10:27:51,183 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:27:51,332 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,44389,1732616870182; all regions closed. 2024-11-26T10:27:51,333 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,333 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,333 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,333 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,333 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741834_1010 (size=1152) 2024-11-26T10:27:51,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741834_1010 (size=1152) 2024-11-26T10:27:51,339 DEBUG [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/oldWALs 2024-11-26T10:27:51,339 INFO [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C44389%2C1732616870182.meta:.meta(num 1732616871019) 2024-11-26T10:27:51,339 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741833_1009 (size=93) 2024-11-26T10:27:51,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741833_1009 (size=93) 2024-11-26T10:27:51,345 DEBUG [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/oldWALs 2024-11-26T10:27:51,345 INFO [RS:0;94eedbb855cf:44389 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C44389%2C1732616870182:(num 1732616870613) 2024-11-26T10:27:51,345 DEBUG [RS:0;94eedbb855cf:44389 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:51,345 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:27:51,345 INFO [RS:0;94eedbb855cf:44389 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:27:51,345 INFO [RS:0;94eedbb855cf:44389 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-26T10:27:51,345 INFO [RS:0;94eedbb855cf:44389 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:27:51,345 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:27:51,345 INFO [RS:0;94eedbb855cf:44389 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44389 2024-11-26T10:27:51,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,44389,1732616870182 2024-11-26T10:27:51,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:27:51,349 INFO [RS:0;94eedbb855cf:44389 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:27:51,350 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,44389,1732616870182] 2024-11-26T10:27:51,351 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,44389,1732616870182 already deleted, retry=false 2024-11-26T10:27:51,352 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,44389,1732616870182 expired; onlineServers=0 2024-11-26T10:27:51,352 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,37121,1732616870125' ***** 2024-11-26T10:27:51,352 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:27:51,352 INFO [M:0;94eedbb855cf:37121 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:27:51,352 INFO [M:0;94eedbb855cf:37121 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:27:51,352 DEBUG [M:0;94eedbb855cf:37121 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:27:51,352 DEBUG [M:0;94eedbb855cf:37121 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:27:51,352 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:27:51,352 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616870379 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616870379,5,FailOnTimeoutGroup] 2024-11-26T10:27:51,352 INFO [M:0;94eedbb855cf:37121 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:27:51,352 INFO [M:0;94eedbb855cf:37121 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:27:51,352 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616870380 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616870380,5,FailOnTimeoutGroup] 2024-11-26T10:27:51,352 DEBUG [M:0;94eedbb855cf:37121 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:27:51,352 INFO [M:0;94eedbb855cf:37121 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:27:51,353 INFO [M:0;94eedbb855cf:37121 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:27:51,353 INFO [M:0;94eedbb855cf:37121 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:27:51,353 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:27:51,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:27:51,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:51,354 DEBUG [M:0;94eedbb855cf:37121 {}] zookeeper.ZKUtil(347): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:27:51,354 WARN [M:0;94eedbb855cf:37121 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:27:51,355 INFO [M:0;94eedbb855cf:37121 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/.lastflushedseqids 2024-11-26T10:27:51,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741836_1012 (size=99) 2024-11-26T10:27:51,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741836_1012 (size=99) 2024-11-26T10:27:51,366 INFO [M:0;94eedbb855cf:37121 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:27:51,367 INFO [M:0;94eedbb855cf:37121 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:27:51,367 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:27:51,367 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:51,367 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:51,367 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:27:51,367 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:51,367 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-26T10:27:51,388 DEBUG [M:0;94eedbb855cf:37121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/651ba9bd9f1147e4b0b48d7e976415a8 is 82, key is hbase:meta,,1/info:regioninfo/1732616871051/Put/seqid=0 2024-11-26T10:27:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741837_1013 (size=5672) 2024-11-26T10:27:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741837_1013 (size=5672) 2024-11-26T10:27:51,395 INFO [M:0;94eedbb855cf:37121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/651ba9bd9f1147e4b0b48d7e976415a8 2024-11-26T10:27:51,426 DEBUG [M:0;94eedbb855cf:37121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c45aa9700b4544caaf1fd8395e6729b5 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732616871075/Put/seqid=0 2024-11-26T10:27:51,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741838_1014 (size=5275) 2024-11-26T10:27:51,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741838_1014 (size=5275) 2024-11-26T10:27:51,435 INFO [M:0;94eedbb855cf:37121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c45aa9700b4544caaf1fd8395e6729b5 2024-11-26T10:27:51,450 INFO [RS:0;94eedbb855cf:44389 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:27:51,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:51,451 INFO [RS:0;94eedbb855cf:44389 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,44389,1732616870182; zookeeper connection closed. 2024-11-26T10:27:51,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44389-0x10153d1436c0001, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:51,451 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4fd20ce8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4fd20ce8 2024-11-26T10:27:51,451 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:27:51,460 DEBUG [M:0;94eedbb855cf:37121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd3e547a8e9d4e16b3505ac6179aac48 is 69, key is 94eedbb855cf,44389,1732616870182/rs:state/1732616870437/Put/seqid=0 2024-11-26T10:27:51,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741839_1015 (size=5156) 2024-11-26T10:27:51,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741839_1015 (size=5156) 2024-11-26T10:27:51,475 INFO [M:0;94eedbb855cf:37121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd3e547a8e9d4e16b3505ac6179aac48 2024-11-26T10:27:51,507 DEBUG [M:0;94eedbb855cf:37121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/098ac1a282144af1bb1ec97765ca1f93 is 52, key is load_balancer_on/state:d/1732616871123/Put/seqid=0 2024-11-26T10:27:51,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741840_1016 (size=5056) 2024-11-26T10:27:51,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741840_1016 (size=5056) 2024-11-26T10:27:51,518 INFO [M:0;94eedbb855cf:37121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/098ac1a282144af1bb1ec97765ca1f93 2024-11-26T10:27:51,525 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/651ba9bd9f1147e4b0b48d7e976415a8 as hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/651ba9bd9f1147e4b0b48d7e976415a8 2024-11-26T10:27:51,532 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/651ba9bd9f1147e4b0b48d7e976415a8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-26T10:27:51,533 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c45aa9700b4544caaf1fd8395e6729b5 as hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c45aa9700b4544caaf1fd8395e6729b5 2024-11-26T10:27:51,540 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c45aa9700b4544caaf1fd8395e6729b5, entries=3, sequenceid=29, filesize=5.2 K 2024-11-26T10:27:51,541 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd3e547a8e9d4e16b3505ac6179aac48 as hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd3e547a8e9d4e16b3505ac6179aac48 2024-11-26T10:27:51,549 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd3e547a8e9d4e16b3505ac6179aac48, entries=1, sequenceid=29, filesize=5.0 K 2024-11-26T10:27:51,551 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/098ac1a282144af1bb1ec97765ca1f93 as hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/098ac1a282144af1bb1ec97765ca1f93 2024-11-26T10:27:51,558 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46211/user/jenkins/test-data/9a8db915-d53c-dbf5-5202-2d531fe071b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/098ac1a282144af1bb1ec97765ca1f93, entries=1, sequenceid=29, filesize=4.9 K 2024-11-26T10:27:51,559 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 192ms, sequenceid=29, compaction requested=false 2024-11-26T10:27:51,568 INFO [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:51,568 DEBUG [M:0;94eedbb855cf:37121 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616871367Disabling compacts and flushes for region at 1732616871367Disabling writes for close at 1732616871367Obtaining lock to block concurrent updates at 1732616871367Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732616871367Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732616871368 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732616871369 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732616871369Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732616871388 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732616871388Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732616871402 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732616871426 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732616871426Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732616871442 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732616871459 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732616871459Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732616871484 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732616871507 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732616871507Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1eb5869c: reopening flushed file at 1732616871524 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f5966ad: reopening flushed file at 1732616871532 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44463b2f: reopening flushed file at 1732616871540 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6088a5b6: reopening flushed file at 1732616871549 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 192ms, sequenceid=29, compaction requested=false at 1732616871559 (+10 ms)Writing region close event to WAL at 1732616871568 (+9 ms)Closed at 1732616871568 2024-11-26T10:27:51,569 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,569 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,569 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:27:51,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35401 is added to blk_1073741830_1006 (size=10311) 2024-11-26T10:27:51,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39131 is added to blk_1073741830_1006 (size=10311) 2024-11-26T10:27:51,572 INFO [M:0;94eedbb855cf:37121 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:27:51,573 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:27:51,573 INFO [M:0;94eedbb855cf:37121 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37121 2024-11-26T10:27:51,573 INFO [M:0;94eedbb855cf:37121 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:27:51,676 INFO [M:0;94eedbb855cf:37121 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:27:51,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:51,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37121-0x10153d1436c0000, quorum=127.0.0.1:49283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:27:51,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cebd4b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:51,680 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e6ff2f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:27:51,680 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:27:51,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f27499{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:27:51,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d944f53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir/,STOPPED} 2024-11-26T10:27:51,682 WARN [BP-1530044533-172.17.0.2-1732616869231 heartbeating to localhost/127.0.0.1:46211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:27:51,682 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:27:51,682 WARN [BP-1530044533-172.17.0.2-1732616869231 heartbeating to localhost/127.0.0.1:46211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1530044533-172.17.0.2-1732616869231 (Datanode Uuid 0af9844e-698d-4a3f-9cb8-3ea097e19dc9) service to localhost/127.0.0.1:46211 2024-11-26T10:27:51,682 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:27:51,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data3/current/BP-1530044533-172.17.0.2-1732616869231 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:51,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data4/current/BP-1530044533-172.17.0.2-1732616869231 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:51,683 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:27:51,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61e52b83{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:51,690 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a18ee0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:27:51,690 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:27:51,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@670e4080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:27:51,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607b9bc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir/,STOPPED} 2024-11-26T10:27:51,692 WARN [BP-1530044533-172.17.0.2-1732616869231 heartbeating to localhost/127.0.0.1:46211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:27:51,692 WARN [BP-1530044533-172.17.0.2-1732616869231 heartbeating to localhost/127.0.0.1:46211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1530044533-172.17.0.2-1732616869231 (Datanode Uuid bd94ebe6-b444-49e1-ae1c-348940b8f28d) service to localhost/127.0.0.1:46211 2024-11-26T10:27:51,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data1/current/BP-1530044533-172.17.0.2-1732616869231 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:51,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/cluster_ebbb2e29-b0be-4666-8cad-1674355cfe10/data/data2/current/BP-1530044533-172.17.0.2-1732616869231 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:27:51,693 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:27:51,693 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:27:51,693 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:27:51,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3235d5ba{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:27:51,700 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347af0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:27:51,700 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:27:51,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a69944b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:27:51,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3c3ceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir/,STOPPED} 2024-11-26T10:27:51,707 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:27:51,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:27:51,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:27:51,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.log.dir so I do NOT create it in target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a 2024-11-26T10:27:51,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5400067-2b0f-4b8a-42be-7c4944ae0001/hadoop.tmp.dir so I do NOT create it in target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a 2024-11-26T10:27:51,731 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313, deleteOnExit=true 2024-11-26T10:27:51,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:27:51,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/test.cache.data in system properties and HBase conf 2024-11-26T10:27:51,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:27:51,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:27:51,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:27:51,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:27:51,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:27:51,732 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:27:51,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:27:51,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:27:51,753 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:27:51,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:51,836 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:27:51,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:27:51,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:27:51,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:27:51,841 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:51,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e7025d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:27:51,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c48ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:27:51,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a640c70{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-34577-hadoop-hdfs-3_4_1-tests_jar-_-any-3613883253876685223/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:27:51,962 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26313eab{HTTP/1.1, (http/1.1)}{localhost:34577} 2024-11-26T10:27:51,962 INFO [Time-limited test {}] server.Server(415): Started @104110ms 2024-11-26T10:27:51,978 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:27:52,046 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:52,050 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:27:52,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:27:52,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:27:52,050 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:27:52,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bb4f47b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:27:52,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c3d2a60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:27:52,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@475f8022{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-45287-hadoop-hdfs-3_4_1-tests_jar-_-any-7735734686994635189/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:52,167 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f8b6f27{HTTP/1.1, (http/1.1)}{localhost:45287} 2024-11-26T10:27:52,167 INFO [Time-limited test {}] server.Server(415): Started @104314ms 2024-11-26T10:27:52,169 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:27:52,202 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:27:52,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:27:52,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:27:52,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:27:52,207 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:27:52,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39a69c39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:27:52,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198c3788{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:27:52,292 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data1/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:52,292 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data2/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:52,315 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:27:52,319 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd8a1c350ad992b3 with lease ID 0xc3f1d4fea88d0428: Processing first storage report for DS-51c1a3b7-694b-4b53-9210-26d4deba4708 from datanode DatanodeRegistration(127.0.0.1:44797, datanodeUuid=d8f6f3df-6727-4385-80f7-bf951ac2cfd8, infoPort=43123, infoSecurePort=0, ipcPort=44319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:27:52,319 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd8a1c350ad992b3 with lease ID 0xc3f1d4fea88d0428: from storage DS-51c1a3b7-694b-4b53-9210-26d4deba4708 node DatanodeRegistration(127.0.0.1:44797, datanodeUuid=d8f6f3df-6727-4385-80f7-bf951ac2cfd8, infoPort=43123, infoSecurePort=0, ipcPort=44319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:27:52,319 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd8a1c350ad992b3 with lease ID 0xc3f1d4fea88d0428: Processing first storage report for DS-b04b2d0b-c97f-45c4-9206-b37fcb1beca3 from datanode DatanodeRegistration(127.0.0.1:44797, datanodeUuid=d8f6f3df-6727-4385-80f7-bf951ac2cfd8, infoPort=43123, infoSecurePort=0, ipcPort=44319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:27:52,319 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd8a1c350ad992b3 with lease ID 0xc3f1d4fea88d0428: from storage DS-b04b2d0b-c97f-45c4-9206-b37fcb1beca3 node DatanodeRegistration(127.0.0.1:44797, datanodeUuid=d8f6f3df-6727-4385-80f7-bf951ac2cfd8, infoPort=43123, infoSecurePort=0, ipcPort=44319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:52,331 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72e5785b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-38597-hadoop-hdfs-3_4_1-tests_jar-_-any-13697402069078291469/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:27:52,332 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68370875{HTTP/1.1, (http/1.1)}{localhost:38597} 2024-11-26T10:27:52,332 INFO [Time-limited test {}] server.Server(415): Started @104479ms 2024-11-26T10:27:52,333 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:27:52,449 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data3/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:52,450 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data4/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:27:52,467 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:27:52,469 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:27:52,469 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa2ebd0e6cd51da97 with lease ID 0xc3f1d4fea88d0429: Processing first storage report for DS-f8481b4f-a002-4859-823b-17ed26ca74f7 from datanode DatanodeRegistration(127.0.0.1:46551, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=39029, infoSecurePort=0, ipcPort=39563, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:27:52,469 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2ebd0e6cd51da97 with lease ID 0xc3f1d4fea88d0429: from storage DS-f8481b4f-a002-4859-823b-17ed26ca74f7 node DatanodeRegistration(127.0.0.1:46551, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=39029, infoSecurePort=0, ipcPort=39563, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:52,470 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa2ebd0e6cd51da97 with lease ID 0xc3f1d4fea88d0429: Processing first storage report for DS-1564ff9f-fbc1-4533-94ec-c95f83bfd18f from datanode DatanodeRegistration(127.0.0.1:46551, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=39029, infoSecurePort=0, ipcPort=39563, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:27:52,470 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2ebd0e6cd51da97 with lease ID 0xc3f1d4fea88d0429: from storage DS-1564ff9f-fbc1-4533-94ec-c95f83bfd18f node DatanodeRegistration(127.0.0.1:46551, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=39029, infoSecurePort=0, ipcPort=39563, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:27:52,561 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a 2024-11-26T10:27:52,563 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/zookeeper_0, clientPort=58673, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:27:52,564 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58673 2024-11-26T10:27:52,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:27:52,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:27:52,578 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf with version=8 2024-11-26T10:27:52,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:27:52,581 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:27:52,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:52,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:52,581 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:27:52,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:52,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:27:52,582 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:27:52,582 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:27:52,583 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44935 2024-11-26T10:27:52,584 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44935 connecting to ZooKeeper ensemble=127.0.0.1:58673 2024-11-26T10:27:52,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449350x0, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:27:52,592 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44935-0x10153d14d020000 connected 2024-11-26T10:27:52,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:52,615 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf, hbase.cluster.distributed=false 2024-11-26T10:27:52,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:27:52,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44935 2024-11-26T10:27:52,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44935 2024-11-26T10:27:52,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44935 2024-11-26T10:27:52,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44935 2024-11-26T10:27:52,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44935 2024-11-26T10:27:52,640 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:27:52,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:52,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:52,640 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:27:52,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:52,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:27:52,641 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:27:52,641 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:27:52,642 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35079 2024-11-26T10:27:52,643 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35079 connecting to ZooKeeper ensemble=127.0.0.1:58673 2024-11-26T10:27:52,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350790x0, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:27:52,653 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35079-0x10153d14d020001 connected 2024-11-26T10:27:52,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:27:52,654 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:27:52,654 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:27:52,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:27:52,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:27:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35079 2024-11-26T10:27:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35079 2024-11-26T10:27:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35079 2024-11-26T10:27:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35079 2024-11-26T10:27:52,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35079 2024-11-26T10:27:52,670 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:44935 2024-11-26T10:27:52,671 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:52,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:52,673 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:27:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,675 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:27:52,675 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,44935,1732616872581 from backup master directory 2024-11-26T10:27:52,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:52,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:27:52,677 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:27:52,677 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,681 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/hbase.id] with ID: df6bd151-1bcf-4e05-b3dc-e60be01c9d2d 2024-11-26T10:27:52,681 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/.tmp/hbase.id 2024-11-26T10:27:52,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:27:52,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:27:52,691 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/.tmp/hbase.id]:[hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/hbase.id] 2024-11-26T10:27:52,705 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:52,705 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:27:52,707 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-26T10:27:52,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:27:52,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:27:52,718 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:27:52,719 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:27:52,720 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:27:52,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:27:52,729 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store 2024-11-26T10:27:52,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:27:52,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:27:52,736 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:52,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:27:52,737 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:52,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:52,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:27:52,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:52,737 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:27:52,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616872736Disabling compacts and flushes for region at 1732616872736Disabling writes for close at 1732616872737 (+1 ms)Writing region close event to WAL at 1732616872737Closed at 1732616872737 2024-11-26T10:27:52,738 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/.initializing 2024-11-26T10:27:52,738 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,741 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C44935%2C1732616872581, suffix=, logDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581, archiveDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/oldWALs, maxLogs=10 2024-11-26T10:27:52,741 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C44935%2C1732616872581.1732616872741 2024-11-26T10:27:52,750 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 2024-11-26T10:27:52,754 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43123:43123),(127.0.0.1/127.0.0.1:39029:39029)] 2024-11-26T10:27:52,756 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:27:52,756 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:52,756 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,756 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,759 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,761 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:27:52,761 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:52,762 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:52,762 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,763 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:27:52,763 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:52,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:52,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:27:52,765 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:52,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:52,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:27:52,767 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:52,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:52,768 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,768 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,769 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,771 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:27:52,772 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:27:52,774 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:27:52,775 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757431, jitterRate=-0.03687676787376404}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:27:52,776 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732616872756Initializing all the Stores at 1732616872757 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616872757Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616872759 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616872759Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616872759Cleaning up temporary data from old regions at 1732616872770 (+11 ms)Region opened successfully at 1732616872776 (+6 ms) 2024-11-26T10:27:52,776 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:27:52,780 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@735edfb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:27:52,781 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:27:52,781 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:27:52,781 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:27:52,782 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:27:52,782 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-26T10:27:52,783 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-26T10:27:52,783 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:27:52,785 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:27:52,786 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:27:52,787 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:27:52,787 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:27:52,788 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:27:52,789 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:27:52,790 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:27:52,790 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:27:52,792 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:27:52,793 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:27:52,795 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:27:52,797 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:27:52,798 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:27:52,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:52,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:27:52,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,800 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,44935,1732616872581, sessionid=0x10153d14d020000, setting cluster-up flag (Was=false) 2024-11-26T10:27:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,809 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:27:52,812 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:52,823 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:27:52,825 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,44935,1732616872581 2024-11-26T10:27:52,827 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:27:52,829 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:52,829 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:27:52,830 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:27:52,830 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,44935,1732616872581 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:27:52,832 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,838 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:52,838 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:27:52,840 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:52,840 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:27:52,848 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732616902848 2024-11-26T10:27:52,848 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:27:52,849 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:27:52,849 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:27:52,849 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:27:52,849 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:27:52,849 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:27:52,849 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,850 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:27:52,850 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:27:52,850 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:27:52,852 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:27:52,852 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:27:52,864 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616872852,5,FailOnTimeoutGroup] 2024-11-26T10:27:52,865 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616872865,5,FailOnTimeoutGroup] 2024-11-26T10:27:52,865 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,865 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:27:52,865 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,865 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:27:52,868 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(746): ClusterId : df6bd151-1bcf-4e05-b3dc-e60be01c9d2d 2024-11-26T10:27:52,868 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:27:52,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:27:52,871 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:27:52,871 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:27:52,874 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:27:52,874 DEBUG [RS:0;94eedbb855cf:35079 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f15e9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:27:52,894 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:35079 2024-11-26T10:27:52,894 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:27:52,894 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:27:52,894 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:27:52,895 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,44935,1732616872581 with port=35079, startcode=1732616872640 2024-11-26T10:27:52,896 DEBUG [RS:0;94eedbb855cf:35079 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:27:52,901 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44121, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:27:52,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44935 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,35079,1732616872640 2024-11-26T10:27:52,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44935 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,35079,1732616872640 2024-11-26T10:27:52,905 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf 2024-11-26T10:27:52,905 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43805 2024-11-26T10:27:52,905 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:27:52,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:27:52,909 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,35079,1732616872640] 2024-11-26T10:27:52,920 DEBUG [RS:0;94eedbb855cf:35079 {}] zookeeper.ZKUtil(111): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,35079,1732616872640 2024-11-26T10:27:52,920 WARN [RS:0;94eedbb855cf:35079 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:27:52,920 INFO [RS:0;94eedbb855cf:35079 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:52,920 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640 2024-11-26T10:27:52,932 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:27:52,935 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:27:52,940 INFO [RS:0;94eedbb855cf:35079 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:27:52,941 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,941 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:27:52,942 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:27:52,943 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,943 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,944 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,944 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:52,944 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:27:52,944 DEBUG [RS:0;94eedbb855cf:35079 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:27:52,949 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,949 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,949 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,949 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,949 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,949 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,35079,1732616872640-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:27:52,973 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:27:52,974 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,35079,1732616872640-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,974 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,974 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.Replication(171): 94eedbb855cf,35079,1732616872640 started 2024-11-26T10:27:52,996 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:52,996 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,35079,1732616872640, RpcServer on 94eedbb855cf/172.17.0.2:35079, sessionid=0x10153d14d020001 2024-11-26T10:27:52,997 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:27:52,997 DEBUG [RS:0;94eedbb855cf:35079 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,35079,1732616872640 2024-11-26T10:27:52,997 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,35079,1732616872640' 2024-11-26T10:27:52,997 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:27:52,998 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:27:52,998 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:27:52,998 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:27:52,998 DEBUG [RS:0;94eedbb855cf:35079 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,35079,1732616872640 2024-11-26T10:27:52,998 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,35079,1732616872640' 2024-11-26T10:27:52,998 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:27:52,999 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:27:52,999 DEBUG [RS:0;94eedbb855cf:35079 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:27:53,000 INFO [RS:0;94eedbb855cf:35079 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:27:53,000 INFO [RS:0;94eedbb855cf:35079 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:27:53,102 INFO [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C35079%2C1732616872640, suffix=, logDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640, archiveDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs, maxLogs=32 2024-11-26T10:27:53,103 INFO [RS:0;94eedbb855cf:35079 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.1732616873103 2024-11-26T10:27:53,111 INFO [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 2024-11-26T10:27:53,112 DEBUG [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43123:43123),(127.0.0.1/127.0.0.1:39029:39029)] 2024-11-26T10:27:53,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:53,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:53,269 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:27:53,269 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf 2024-11-26T10:27:53,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741833_1009 (size=32) 2024-11-26T10:27:53,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741833_1009 (size=32) 2024-11-26T10:27:53,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:53,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:27:53,280 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:27:53,281 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:27:53,283 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:27:53,283 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:27:53,285 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:27:53,285 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:27:53,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:27:53,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:27:53,289 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740 2024-11-26T10:27:53,289 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740 2024-11-26T10:27:53,290 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:27:53,290 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:27:53,291 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:27:53,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:27:53,294 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:27:53,295 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866512, jitterRate=0.10182817280292511}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:27:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732616873278Initializing all the Stores at 1732616873278Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616873278Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616873279 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616873279Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616873279Cleaning up temporary data from old regions at 1732616873290 (+11 ms)Region opened successfully at 1732616873295 (+5 ms) 2024-11-26T10:27:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:27:53,296 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:27:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:27:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:27:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:27:53,296 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:27:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616873296Disabling compacts and flushes for region at 1732616873296Disabling writes for close at 1732616873296Writing region close event to WAL at 1732616873296Closed at 1732616873296 2024-11-26T10:27:53,298 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:53,298 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:27:53,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:27:53,300 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:27:53,301 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:27:53,451 DEBUG [94eedbb855cf:44935 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:27:53,452 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,35079,1732616872640 2024-11-26T10:27:53,454 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,35079,1732616872640, state=OPENING 2024-11-26T10:27:53,457 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:27:53,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:53,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:27:53,460 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:27:53,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:53,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,35079,1732616872640}] 2024-11-26T10:27:53,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:53,614 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:27:53,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59693, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:27:53,620 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:27:53,621 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:53,622 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C35079%2C1732616872640.meta, suffix=.meta, logDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640, archiveDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs, maxLogs=32 2024-11-26T10:27:53,623 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta 2024-11-26T10:27:53,628 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta 2024-11-26T10:27:53,634 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39029:39029),(127.0.0.1/127.0.0.1:43123:43123)] 2024-11-26T10:27:53,637 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:27:53,637 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:27:53,637 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:27:53,637 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:27:53,637 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:27:53,637 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:53,638 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:27:53,638 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:27:53,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:27:53,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:27:53,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:27:53,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:27:53,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:27:53,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:27:53,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:27:53,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:27:53,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:27:53,645 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:27:53,646 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740 2024-11-26T10:27:53,647 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740 2024-11-26T10:27:53,648 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:27:53,648 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:27:53,648 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:27:53,649 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:27:53,650 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700271, jitterRate=-0.10956031084060669}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:27:53,650 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:27:53,651 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732616873638Writing region info on filesystem at 1732616873638Initializing all the Stores at 1732616873639 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616873639Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616873639Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616873639Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616873639Cleaning up temporary data from old regions at 1732616873648 (+9 ms)Running coprocessor post-open hooks at 1732616873650 (+2 ms)Region opened successfully at 1732616873651 (+1 ms) 2024-11-26T10:27:53,652 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732616873614 2024-11-26T10:27:53,655 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:27:53,655 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:27:53,656 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,35079,1732616872640 2024-11-26T10:27:53,657 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,35079,1732616872640, state=OPEN 2024-11-26T10:27:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:27:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:27:53,661 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,35079,1732616872640 2024-11-26T10:27:53,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:53,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:27:53,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:27:53,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,35079,1732616872640 in 201 msec 2024-11-26T10:27:53,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:27:53,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 366 msec 2024-11-26T10:27:53,668 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:27:53,668 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:27:53,669 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:27:53,669 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,35079,1732616872640, seqNum=-1] 2024-11-26T10:27:53,670 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:27:53,671 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53821, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:27:53,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 847 msec 2024-11-26T10:27:53,677 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732616873676, completionTime=-1 2024-11-26T10:27:53,677 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:27:53,677 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:27:53,678 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732616933678 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732616993679 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44935,1732616872581-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44935,1732616872581-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44935,1732616872581-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:44935, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,679 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,681 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.006sec 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44935,1732616872581-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:27:53,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44935,1732616872581-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:27:53,685 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:27:53,686 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:27:53,686 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,44935,1732616872581-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,765 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:27:53,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cae84f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:27:53,766 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,44935,-1 for getting cluster id 2024-11-26T10:27:53,767 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:27:53,768 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'df6bd151-1bcf-4e05-b3dc-e60be01c9d2d' 2024-11-26T10:27:53,769 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:27:53,769 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "df6bd151-1bcf-4e05-b3dc-e60be01c9d2d" 2024-11-26T10:27:53,769 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f2f8d94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:27:53,769 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,44935,-1] 2024-11-26T10:27:53,769 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:27:53,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:53,770 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:27:53,772 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33734, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:27:53,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd7b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:27:53,773 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:27:53,774 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,35079,1732616872640, seqNum=-1] 2024-11-26T10:27:53,775 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:27:53,776 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:27:53,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,44935,1732616872581 2024-11-26T10:27:53,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:53,781 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:27:53,786 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:53,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:53,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:53,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-26T10:27:53,797 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:27:53,797 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:27:53,798 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39711 2024-11-26T10:27:53,799 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39711 connecting to ZooKeeper ensemble=127.0.0.1:58673 2024-11-26T10:27:53,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:53,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:27:53,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397110x0, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:27:53,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39711-0x10153d14d020002 connected 2024-11-26T10:27:53,811 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-26T10:27:53,811 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-26T10:27:53,812 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:27:53,812 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:27:53,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:27:53,814 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:27:53,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39711 2024-11-26T10:27:53,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39711 2024-11-26T10:27:53,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39711 2024-11-26T10:27:53,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39711 2024-11-26T10:27:53,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39711 2024-11-26T10:27:53,817 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(746): ClusterId : df6bd151-1bcf-4e05-b3dc-e60be01c9d2d 2024-11-26T10:27:53,817 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:27:53,819 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:27:53,819 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:27:53,821 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:27:53,822 DEBUG [RS:1;94eedbb855cf:39711 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a09c68c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:27:53,833 DEBUG [RS:1;94eedbb855cf:39711 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;94eedbb855cf:39711 2024-11-26T10:27:53,833 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:27:53,833 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:27:53,833 DEBUG [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:27:53,834 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,44935,1732616872581 with port=39711, startcode=1732616873796 2024-11-26T10:27:53,834 DEBUG [RS:1;94eedbb855cf:39711 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:27:53,836 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44105, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:27:53,836 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44935 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,39711,1732616873796 2024-11-26T10:27:53,836 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44935 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,39711,1732616873796 2024-11-26T10:27:53,838 DEBUG [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf 2024-11-26T10:27:53,838 DEBUG [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43805 2024-11-26T10:27:53,838 DEBUG [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:27:53,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:27:53,840 DEBUG [RS:1;94eedbb855cf:39711 {}] zookeeper.ZKUtil(111): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,39711,1732616873796 2024-11-26T10:27:53,840 WARN [RS:1;94eedbb855cf:39711 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:27:53,840 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,39711,1732616873796] 2024-11-26T10:27:53,840 INFO [RS:1;94eedbb855cf:39711 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:27:53,841 DEBUG [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796 2024-11-26T10:27:53,844 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:27:53,845 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:27:53,846 INFO [RS:1;94eedbb855cf:39711 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:27:53,846 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,846 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:27:53,847 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:27:53,847 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,847 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,848 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:27:53,848 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:27:53,848 DEBUG [RS:1;94eedbb855cf:39711 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:27:53,848 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,848 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,848 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,848 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,848 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,848 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39711,1732616873796-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:27:53,863 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:27:53,863 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39711,1732616873796-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,863 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,863 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.Replication(171): 94eedbb855cf,39711,1732616873796 started 2024-11-26T10:27:53,881 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:27:53,881 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,39711,1732616873796, RpcServer on 94eedbb855cf/172.17.0.2:39711, sessionid=0x10153d14d020002 2024-11-26T10:27:53,881 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:27:53,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;94eedbb855cf:39711,5,FailOnTimeoutGroup] 2024-11-26T10:27:53,881 DEBUG [RS:1;94eedbb855cf:39711 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,39711,1732616873796 2024-11-26T10:27:53,881 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,39711,1732616873796' 2024-11-26T10:27:53,881 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:27:53,882 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-26T10:27:53,882 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-26T10:27:53,889 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 94eedbb855cf,44935,1732616872581 2024-11-26T10:27:53,889 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1e2d45f3 2024-11-26T10:27:53,890 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:27:53,894 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:27:53,895 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:27:53,895 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:27:53,895 DEBUG [RS:1;94eedbb855cf:39711 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,39711,1732616873796 2024-11-26T10:27:53,895 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,39711,1732616873796' 2024-11-26T10:27:53,895 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:27:53,896 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:27:53,896 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33740, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:27:53,896 DEBUG [RS:1;94eedbb855cf:39711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:27:53,897 INFO [RS:1;94eedbb855cf:39711 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:27:53,897 INFO [RS:1;94eedbb855cf:39711 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:27:53,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-26T10:27:53,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-26T10:27:53,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:27:53,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-26T10:27:53,904 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:27:53,904 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:53,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-26T10:27:53,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:27:53,906 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:27:53,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741835_1011 (size=393) 2024-11-26T10:27:53,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741835_1011 (size=393) 2024-11-26T10:27:53,924 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f29613695113f1cf4650e021f3894b68, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf 2024-11-26T10:27:53,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46551 is added to blk_1073741836_1012 (size=76) 2024-11-26T10:27:53,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44797 is added to blk_1073741836_1012 (size=76) 2024-11-26T10:27:53,934 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:53,935 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing f29613695113f1cf4650e021f3894b68, disabling compactions & flushes 2024-11-26T10:27:53,935 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:53,935 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:53,935 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. after waiting 0 ms 2024-11-26T10:27:53,935 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:53,935 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:53,936 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for f29613695113f1cf4650e021f3894b68: Waiting for close lock at 1732616873935Disabling compacts and flushes for region at 1732616873935Disabling writes for close at 1732616873935Writing region close event to WAL at 1732616873935Closed at 1732616873935 2024-11-26T10:27:53,938 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:27:53,938 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732616873938"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732616873938"}]},"ts":"1732616873938"} 2024-11-26T10:27:53,943 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-26T10:27:53,945 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:27:53,945 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616873945"}]},"ts":"1732616873945"} 2024-11-26T10:27:53,948 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-26T10:27:53,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f29613695113f1cf4650e021f3894b68, ASSIGN}] 2024-11-26T10:27:53,950 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f29613695113f1cf4650e021f3894b68, ASSIGN 2024-11-26T10:27:53,951 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f29613695113f1cf4650e021f3894b68, ASSIGN; state=OFFLINE, location=94eedbb855cf,35079,1732616872640; forceNewPlan=false, retain=false 2024-11-26T10:27:53,999 INFO [RS:1;94eedbb855cf:39711 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C39711%2C1732616873796, suffix=, logDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796, archiveDir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs, maxLogs=32 2024-11-26T10:27:54,000 INFO [RS:1;94eedbb855cf:39711 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39711%2C1732616873796.1732616874000 2024-11-26T10:27:54,008 INFO [RS:1;94eedbb855cf:39711 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 2024-11-26T10:27:54,009 DEBUG [RS:1;94eedbb855cf:39711 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39029:39029),(127.0.0.1/127.0.0.1:43123:43123)] 2024-11-26T10:27:54,102 INFO [94eedbb855cf:44935 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-26T10:27:54,103 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f29613695113f1cf4650e021f3894b68, regionState=OPENING, regionLocation=94eedbb855cf,35079,1732616872640 2024-11-26T10:27:54,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f29613695113f1cf4650e021f3894b68, ASSIGN because future has completed 2024-11-26T10:27:54,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f29613695113f1cf4650e021f3894b68, server=94eedbb855cf,35079,1732616872640}] 2024-11-26T10:27:54,263 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:54,263 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f29613695113f1cf4650e021f3894b68, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:27:54,264 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,264 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:27:54,264 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,264 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,266 INFO [StoreOpener-f29613695113f1cf4650e021f3894b68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,267 INFO [StoreOpener-f29613695113f1cf4650e021f3894b68-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f29613695113f1cf4650e021f3894b68 columnFamilyName info 2024-11-26T10:27:54,267 DEBUG [StoreOpener-f29613695113f1cf4650e021f3894b68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:27:54,268 INFO [StoreOpener-f29613695113f1cf4650e021f3894b68-1 {}] regionserver.HStore(327): Store=f29613695113f1cf4650e021f3894b68/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:27:54,268 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,268 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,269 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,269 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,269 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,271 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,273 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:27:54,274 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f29613695113f1cf4650e021f3894b68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732506, jitterRate=-0.06857168674468994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:27:54,274 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f29613695113f1cf4650e021f3894b68 2024-11-26T10:27:54,275 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f29613695113f1cf4650e021f3894b68: Running coprocessor pre-open hook at 1732616874264Writing region info on filesystem at 1732616874264Initializing all the Stores at 1732616874265 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616874265Cleaning up temporary data from old regions at 1732616874269 (+4 ms)Running coprocessor post-open hooks at 1732616874274 (+5 ms)Region opened successfully at 1732616874275 (+1 ms) 2024-11-26T10:27:54,276 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68., pid=6, masterSystemTime=1732616874259 2024-11-26T10:27:54,279 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:54,279 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:27:54,280 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f29613695113f1cf4650e021f3894b68, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,35079,1732616872640 2024-11-26T10:27:54,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f29613695113f1cf4650e021f3894b68, server=94eedbb855cf,35079,1732616872640 because future has completed 2024-11-26T10:27:54,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-26T10:27:54,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f29613695113f1cf4650e021f3894b68, server=94eedbb855cf,35079,1732616872640 in 178 msec 2024-11-26T10:27:54,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-26T10:27:54,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f29613695113f1cf4650e021f3894b68, ASSIGN in 341 msec 2024-11-26T10:27:54,292 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:27:54,293 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616874292"}]},"ts":"1732616874292"} 2024-11-26T10:27:54,295 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-26T10:27:54,296 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:27:54,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 399 msec 2024-11-26T10:27:59,139 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:27:59,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:59,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:59,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:59,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:27:59,165 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-26T10:28:03,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-26T10:28:03,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-26T10:28:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:28:03,965 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-26T10:28:03,965 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-26T10:28:03,968 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-26T10:28:03,968 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:03,982 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:03,986 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:03,986 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:03,986 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:03,986 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:03,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@402062d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:03,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@219c70cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:04,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43e34915{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-38717-hadoop-hdfs-3_4_1-tests_jar-_-any-4222851869393791977/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:04,104 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d7fa207{HTTP/1.1, (http/1.1)}{localhost:38717} 2024-11-26T10:28:04,104 INFO [Time-limited test {}] server.Server(415): Started @116251ms 2024-11-26T10:28:04,105 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:04,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:04,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:04,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:04,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:04,153 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:28:04,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5e4864{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:04,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fb99827{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:04,216 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data5/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:04,216 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data6/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:04,233 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:04,236 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe2053d4868825834 with lease ID 0xc3f1d4fea88d042a: Processing first storage report for DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5 from datanode DatanodeRegistration(127.0.0.1:43537, datanodeUuid=7d42f26b-1e2d-4bd4-b876-f1cc6f9bce84, infoPort=34067, infoSecurePort=0, ipcPort=43249, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:28:04,236 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe2053d4868825834 with lease ID 0xc3f1d4fea88d042a: from storage DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5 node DatanodeRegistration(127.0.0.1:43537, datanodeUuid=7d42f26b-1e2d-4bd4-b876-f1cc6f9bce84, infoPort=34067, infoSecurePort=0, ipcPort=43249, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:28:04,236 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe2053d4868825834 with lease ID 0xc3f1d4fea88d042a: Processing first storage report for DS-d1456ba7-b719-4b49-bc34-fcb1c0cd59e5 from datanode DatanodeRegistration(127.0.0.1:43537, datanodeUuid=7d42f26b-1e2d-4bd4-b876-f1cc6f9bce84, infoPort=34067, infoSecurePort=0, ipcPort=43249, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:28:04,236 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe2053d4868825834 with lease ID 0xc3f1d4fea88d042a: from storage DS-d1456ba7-b719-4b49-bc34-fcb1c0cd59e5 node DatanodeRegistration(127.0.0.1:43537, datanodeUuid=7d42f26b-1e2d-4bd4-b876-f1cc6f9bce84, infoPort=34067, infoSecurePort=0, ipcPort=43249, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:04,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@233be953{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-36577-hadoop-hdfs-3_4_1-tests_jar-_-any-10570097240757110015/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:04,287 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71810790{HTTP/1.1, (http/1.1)}{localhost:36577} 2024-11-26T10:28:04,287 INFO [Time-limited test {}] server.Server(415): Started @116434ms 2024-11-26T10:28:04,288 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:04,322 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:04,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:04,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:04,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:04,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:04,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25509568{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:04,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41559526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:04,384 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:04,384 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:04,406 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb08b74d93a82f0e with lease ID 0xc3f1d4fea88d042b: Processing first storage report for DS-69e02dbc-743b-40eb-8060-6a77d32c2103 from datanode DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:28:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb08b74d93a82f0e with lease ID 0xc3f1d4fea88d042b: from storage DS-69e02dbc-743b-40eb-8060-6a77d32c2103 node DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb08b74d93a82f0e with lease ID 0xc3f1d4fea88d042b: Processing first storage report for DS-93301643-06e6-46b3-9c26-9256bd8187cb from datanode DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:28:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb08b74d93a82f0e with lease ID 0xc3f1d4fea88d042b: from storage DS-93301643-06e6-46b3-9c26-9256bd8187cb node DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:04,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69de1683{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-42347-hadoop-hdfs-3_4_1-tests_jar-_-any-15581946369989593894/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:04,444 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2801262{HTTP/1.1, (http/1.1)}{localhost:42347} 2024-11-26T10:28:04,444 INFO [Time-limited test {}] server.Server(415): Started @116592ms 2024-11-26T10:28:04,446 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:04,543 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data9/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:04,544 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data10/current/BP-482080985-172.17.0.2-1732616871772/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:04,566 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:04,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bb6667fa8b6f4ed with lease ID 0xc3f1d4fea88d042c: Processing first storage report for DS-95875dab-37fd-430a-a723-37131bdf6cfa from datanode DatanodeRegistration(127.0.0.1:36471, datanodeUuid=aa7af06f-8570-4cf5-9cf5-975e58966074, infoPort=35921, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:28:04,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bb6667fa8b6f4ed with lease ID 0xc3f1d4fea88d042c: from storage DS-95875dab-37fd-430a-a723-37131bdf6cfa node DatanodeRegistration(127.0.0.1:36471, datanodeUuid=aa7af06f-8570-4cf5-9cf5-975e58966074, infoPort=35921, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:04,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bb6667fa8b6f4ed with lease ID 0xc3f1d4fea88d042c: Processing first storage report for DS-09c08efe-5ee0-4772-9528-3af36dd4f878 from datanode DatanodeRegistration(127.0.0.1:36471, datanodeUuid=aa7af06f-8570-4cf5-9cf5-975e58966074, infoPort=35921, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772) 2024-11-26T10:28:04,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bb6667fa8b6f4ed with lease ID 0xc3f1d4fea88d042c: from storage DS-09c08efe-5ee0-4772-9528-3af36dd4f878 node DatanodeRegistration(127.0.0.1:36471, datanodeUuid=aa7af06f-8570-4cf5-9cf5-975e58966074, infoPort=35921, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:04,667 WARN [ResponseProcessor for block BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,667 WARN [ResponseProcessor for block BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,667 WARN [ResponseProcessor for block BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,667 WARN [ResponseProcessor for block BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008 java.io.IOException: Bad response ERROR for BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008 from datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,667 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta block BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:04,667 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 block BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:04,668 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 block BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:04,667 WARN [PacketResponder: BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46551] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,668 WARN [PacketResponder: BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46551] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,668 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 block BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:04,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-958811595_22 at /127.0.0.1:59714 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59714 dst: /127.0.0.1:44797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:49280 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46551:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49280 dst: /127.0.0.1:46551 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785408445_22 at /127.0.0.1:49308 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46551:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49308 dst: /127.0.0.1:46551 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:59724 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:44797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59724 dst: /127.0.0.1:44797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72e5785b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:04,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68370875{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:04,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:04,670 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-958811595_22 at /127.0.0.1:49260 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46551:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49260 dst: /127.0.0.1:46551 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198c3788{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:04,670 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:59738 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59738 dst: /127.0.0.1:44797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,670 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:49266 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46551:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49266 dst: /127.0.0.1:46551 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39a69c39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:04,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785408445_22 at /127.0.0.1:59780 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59780 dst: /127.0.0.1:44797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:04,672 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:04,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:04,672 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482080985-172.17.0.2-1732616871772 (Datanode Uuid 0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11) service to localhost/127.0.0.1:43805 2024-11-26T10:28:04,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:04,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data3/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:04,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data4/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:04,673 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:04,675 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 block BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,675 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 block BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,675 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta block BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,675 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 block BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@475f8022{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:04,676 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f8b6f27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:04,676 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:04,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c3d2a60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:04,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bb4f47b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:04,681 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:04,681 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482080985-172.17.0.2-1732616871772 (Datanode Uuid d8f6f3df-6727-4385-80f7-bf951ac2cfd8) service to localhost/127.0.0.1:43805 2024-11-26T10:28:04,682 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data2/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:04,682 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data1/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:04,682 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:04,682 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:04,682 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:04,686 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68., hostname=94eedbb855cf,35079,1732616872640, seqNum=2] 2024-11-26T10:28:04,687 ERROR [FSHLog-0-hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf-prefix:94eedbb855cf,35079,1732616872640 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,687 WARN [FSHLog-0-hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf-prefix:94eedbb855cf,35079,1732616872640 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,688 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,688 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C35079%2C1732616872640:(num 1732616873103) roll requested 2024-11-26T10:28:04,688 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.1732616884688 2024-11-26T10:28:04,690 WARN [Thread-900 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,691 WARN [Thread-900 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:04,691 WARN [Thread-900 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741838_1018 2024-11-26T10:28:04,693 WARN [Thread-900 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:04,699 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:04,699 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:04,699 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:04,699 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:04,699 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:04,699 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616884688 2024-11-26T10:28:04,700 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,700 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:04,700 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34067:34067),(127.0.0.1/127.0.0.1:35921:35921)] 2024-11-26T10:28:04,700 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:04,701 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-26T10:28:04,701 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-26T10:28:04,701 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 2024-11-26T10:28:04,703 WARN [IPC Server handler 2 on default port 43805 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741832_1008 2024-11-26T10:28:04,707 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 after 4ms 2024-11-26T10:28:04,721 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:05,849 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:06,701 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:06,702 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616884688 2024-11-26T10:28:06,702 WARN [ResponseProcessor for block BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:06,703 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616884688 block BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:06,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42304 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:43537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42304 dst: /127.0.0.1:43537 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:06,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:47226 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:36471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47226 dst: /127.0.0.1:36471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:06,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43e34915{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:06,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d7fa207{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:06,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:06,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@219c70cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:06,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@402062d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:06,708 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:06,708 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:06,708 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:06,708 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482080985-172.17.0.2-1732616871772 (Datanode Uuid 7d42f26b-1e2d-4bd4-b876-f1cc6f9bce84) service to localhost/127.0.0.1:43805 2024-11-26T10:28:06,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data5/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:06,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data6/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:06,709 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:06,721 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:07,849 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:08,701 WARN [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]] 2024-11-26T10:28:08,702 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:08,702 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C35079%2C1732616872640:(num 1732616884688) roll requested 2024-11-26T10:28:08,702 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.1732616888702 2024-11-26T10:28:08,707 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46551 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:08,706 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:45796 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741840_1022 to mirror 127.0.0.1:46551 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:08,707 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:08,707 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741840_1022 2024-11-26T10:28:08,707 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:45796 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:08,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:45796 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45796 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:08,707 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 after 4006ms 2024-11-26T10:28:08,708 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:08,709 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:08,709 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:08,709 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741841_1023 2024-11-26T10:28:08,709 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:08,711 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43537 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:08,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:47250 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data10]'}, localName='127.0.0.1:36471', datanodeUuid='aa7af06f-8570-4cf5-9cf5-975e58966074', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741842_1024 to mirror 127.0.0.1:43537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:08,711 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:08,711 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741842_1024 2024-11-26T10:28:08,711 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:47250 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:08,712 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:47250 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47250 dst: /127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:08,712 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:08,714 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-26T10:28:08,717 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:08,717 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:08,717 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:08,717 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:08,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:08,717 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616884688 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616888702 2024-11-26T10:28:08,718 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35921:35921),(127.0.0.1/127.0.0.1:42409:42409)] 2024-11-26T10:28:08,718 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:08,718 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616884688 is not closed yet, will try archiving it next time 2024-11-26T10:28:08,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36471 is added to blk_1073741839_1021 (size=3600) 2024-11-26T10:28:08,721 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:09,120 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:09,849 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,575 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33867007[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36471, datanodeUuid=aa7af06f-8570-4cf5-9cf5-975e58966074, infoPort=35921, infoSecurePort=0, ipcPort=45383, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741839_1021 to 127.0.0.1:46551 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:10,718 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,719 WARN [ResponseProcessor for block BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,720 WARN [DataStreamer for file /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616888702 block BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:10,720 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:47266 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:36471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47266 dst: /127.0.0.1:36471 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:10,720 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:45804 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45804 dst: /127.0.0.1:45009 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:10,721 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69de1683{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:10,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2801262{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:10,723 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:10,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41559526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:10,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25509568{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:10,724 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:10,724 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:10,724 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:10,724 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482080985-172.17.0.2-1732616871772 (Datanode Uuid aa7af06f-8570-4cf5-9cf5-975e58966074) service to localhost/127.0.0.1:43805 2024-11-26T10:28:10,725 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data9/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:10,725 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data10/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:10,725 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35079 {}] regionserver.HRegion(8855): Flush requested on f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:10,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:28:10,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/57c1996f6b5649ecbf5b01cbe39d8b04 is 1080, key is row0002/info:/1732616886711/Put/seqid=0 2024-11-26T10:28:10,753 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,753 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:10,753 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741844_1027 2024-11-26T10:28:10,754 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:10,755 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,755 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:10,755 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741845_1028 2024-11-26T10:28:10,756 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:10,758 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42804 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741846_1029 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:10,759 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:10,759 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741846_1029 2024-11-26T10:28:10,759 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42804 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:10,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42804 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42804 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:10,759 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:10,760 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:10,760 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:10,760 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741847_1030 2024-11-26T10:28:10,761 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:10,761 WARN [IPC Server handler 1 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:10,762 WARN [IPC Server handler 1 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:10,762 WARN [IPC Server handler 1 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:10,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741848_1031 (size=10347) 2024-11-26T10:28:11,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/57c1996f6b5649ecbf5b01cbe39d8b04 2024-11-26T10:28:11,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/57c1996f6b5649ecbf5b01cbe39d8b04 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/57c1996f6b5649ecbf5b01cbe39d8b04 2024-11-26T10:28:11,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/57c1996f6b5649ecbf5b01cbe39d8b04, entries=5, sequenceid=11, filesize=10.1 K 2024-11-26T10:28:11,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for f29613695113f1cf4650e021f3894b68 in 446ms, sequenceid=11, compaction requested=false 2024-11-26T10:28:11,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35079 {}] regionserver.HRegion(8855): Flush requested on f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:11,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-26T10:28:11,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/409df1edc5434b86a534ee0442904dce is 1080, key is row0007/info:/1732616890735/Put/seqid=0 2024-11-26T10:28:11,361 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:11,361 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:11,361 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741849_1032 2024-11-26T10:28:11,362 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:11,363 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:11,363 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:11,363 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741850_1033 2024-11-26T10:28:11,363 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:11,366 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44797 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:11,366 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42836 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741851_1034 to mirror 127.0.0.1:44797 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:11,366 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:11,366 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741851_1034 2024-11-26T10:28:11,366 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42836 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:11,366 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42836 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42836 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:11,366 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:11,368 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43537 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:11,369 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:11,369 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741852_1035 2024-11-26T10:28:11,368 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42840 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741852_1035 to mirror 127.0.0.1:43537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:11,369 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42840 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:11,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42840 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42840 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:11,369 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:11,370 WARN [IPC Server handler 1 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:11,370 WARN [IPC Server handler 1 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:11,370 WARN [IPC Server handler 1 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:11,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741853_1036 (size=12506) 2024-11-26T10:28:11,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/409df1edc5434b86a534ee0442904dce 2024-11-26T10:28:11,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/409df1edc5434b86a534ee0442904dce as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce 2024-11-26T10:28:11,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce, entries=7, sequenceid=24, filesize=12.2 K 2024-11-26T10:28:11,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for f29613695113f1cf4650e021f3894b68 in 433ms, sequenceid=24, compaction requested=false 2024-11-26T10:28:11,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:11,787 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-26T10:28:11,787 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:11,787 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce because midkey is the same as first or last row 2024-11-26T10:28:11,850 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,719 WARN [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]] 2024-11-26T10:28:12,719 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,719 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C35079%2C1732616872640:(num 1732616888702) roll requested 2024-11-26T10:28:12,720 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.1732616892719 2024-11-26T10:28:12,722 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,723 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46551 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42850 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741854_1037 to mirror 127.0.0.1:46551 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,724 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:12,724 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741854_1037 2024-11-26T10:28:12,724 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42850 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:12,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42850 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42850 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,724 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:12,725 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,725 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:12,725 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741855_1038 2024-11-26T10:28:12,726 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:12,727 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,727 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:12,727 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741856_1039 2024-11-26T10:28:12,727 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:12,729 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42858 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741857_1040 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,729 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:12,729 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741857_1040 2024-11-26T10:28:12,729 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42858 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:12,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42858 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42858 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,730 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:12,730 WARN [IPC Server handler 3 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:12,730 WARN [IPC Server handler 3 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:12,730 WARN [IPC Server handler 3 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:12,733 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:12,733 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:12,733 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:12,733 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:12,733 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:12,733 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616888702 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616892719 2024-11-26T10:28:12,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741843_1026 (size=24823) 2024-11-26T10:28:12,736 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42409:42409)] 2024-11-26T10:28:12,736 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:12,736 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616888702 is not closed yet, will try archiving it next time 2024-11-26T10:28:12,738 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616884688 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs/94eedbb855cf%2C35079%2C1732616872640.1732616884688 2024-11-26T10:28:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35079 {}] regionserver.HRegion(8855): Flush requested on f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:12,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-26T10:28:12,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/94f859be0c9d4e85b69f2d82a6354338 is 1079, key is tmprow/info:/1732616892770/Put/seqid=0 2024-11-26T10:28:12,778 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,778 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42886 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741859_1042 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,779 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:12,779 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741859_1042 2024-11-26T10:28:12,779 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42886 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:12,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42886 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42886 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,779 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:12,780 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,781 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:12,781 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741860_1043 2024-11-26T10:28:12,781 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:12,783 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46551 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42888 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741861_1044 to mirror 127.0.0.1:46551 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,783 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:12,783 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741861_1044 2024-11-26T10:28:12,783 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42888 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:12,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42888 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42888 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:12,784 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:12,785 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:12,785 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:12,785 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741862_1045 2024-11-26T10:28:12,785 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:12,786 WARN [IPC Server handler 4 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:12,786 WARN [IPC Server handler 4 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:12,786 WARN [IPC Server handler 4 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:12,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741863_1046 (size=6027) 2024-11-26T10:28:13,135 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:13,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/94f859be0c9d4e85b69f2d82a6354338 2024-11-26T10:28:13,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/94f859be0c9d4e85b69f2d82a6354338 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/94f859be0c9d4e85b69f2d82a6354338 2024-11-26T10:28:13,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/94f859be0c9d4e85b69f2d82a6354338, entries=1, sequenceid=34, filesize=5.9 K 2024-11-26T10:28:13,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for f29613695113f1cf4650e021f3894b68 in 432ms, sequenceid=34, compaction requested=true 2024-11-26T10:28:13,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:13,203 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-26T10:28:13,203 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:13,203 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce because midkey is the same as first or last row 2024-11-26T10:28:13,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f29613695113f1cf4650e021f3894b68:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:28:13,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:28:13,204 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:28:13,205 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:28:13,205 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HStore(1541): f29613695113f1cf4650e021f3894b68/info is initiating minor compaction (all files) 2024-11-26T10:28:13,205 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f29613695113f1cf4650e021f3894b68/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:13,205 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/57c1996f6b5649ecbf5b01cbe39d8b04, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/94f859be0c9d4e85b69f2d82a6354338] into tmpdir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp, totalSize=28.2 K 2024-11-26T10:28:13,206 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57c1996f6b5649ecbf5b01cbe39d8b04, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732616886711 2024-11-26T10:28:13,206 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 409df1edc5434b86a534ee0442904dce, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732616890735 2024-11-26T10:28:13,207 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 94f859be0c9d4e85b69f2d82a6354338, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732616892770 2024-11-26T10:28:13,220 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f29613695113f1cf4650e021f3894b68#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:28:13,221 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/8e3460fa04d94ee3aac258a150aea2e6 is 1080, key is row0002/info:/1732616886711/Put/seqid=0 2024-11-26T10:28:13,223 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:13,223 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:13,223 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741864_1047 2024-11-26T10:28:13,224 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:13,225 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:13,225 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:13,225 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741865_1048 2024-11-26T10:28:13,226 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:13,228 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43537 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:13,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42926 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741866_1049 to mirror 127.0.0.1:43537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:13,228 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:13,228 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741866_1049 2024-11-26T10:28:13,228 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42926 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:13,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42926 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42926 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:13,229 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:13,230 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:13,230 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:13,230 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741867_1050 2024-11-26T10:28:13,230 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:13,231 WARN [IPC Server handler 1 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:13,231 WARN [IPC Server handler 1 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:13,231 WARN [IPC Server handler 1 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:13,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741868_1051 (size=17994) 2024-11-26T10:28:13,411 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741848_1031 to 127.0.0.1:44797 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:13,411 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2405ab2e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741853_1036 to 127.0.0.1:36471 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:13,642 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/8e3460fa04d94ee3aac258a150aea2e6 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 2024-11-26T10:28:13,648 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f29613695113f1cf4650e021f3894b68/info of f29613695113f1cf4650e021f3894b68 into 8e3460fa04d94ee3aac258a150aea2e6(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:28:13,648 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:13,648 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68., storeName=f29613695113f1cf4650e021f3894b68/info, priority=13, startTime=1732616893203; duration=0sec 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 because midkey is the same as first or last row 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 because midkey is the same as first or last row 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 because midkey is the same as first or last row 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:28:13,649 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f29613695113f1cf4650e021f3894b68:info 2024-11-26T10:28:13,850 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35079 {}] regionserver.HRegion(8855): Flush requested on f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:14,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-26T10:28:14,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/59734e622f104e939ceaef5265801d76 is 1079, key is tmprow/info:/1732616894188/Put/seqid=0 2024-11-26T10:28:14,195 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,196 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:14,196 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741869_1052 2024-11-26T10:28:14,196 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:14,197 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,198 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:14,198 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741870_1053 2024-11-26T10:28:14,198 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:14,199 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,199 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:14,199 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741871_1054 2024-11-26T10:28:14,200 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:14,201 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,201 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:14,201 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741872_1055 2024-11-26T10:28:14,202 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:14,202 WARN [IPC Server handler 0 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:14,202 WARN [IPC Server handler 0 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:14,202 WARN [IPC Server handler 0 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:14,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741873_1056 (size=6027) 2024-11-26T10:28:14,411 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741863_1046 to 127.0.0.1:43537 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,411 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2405ab2e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741843_1026 to 127.0.0.1:46551 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/59734e622f104e939ceaef5265801d76 2024-11-26T10:28:14,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/59734e622f104e939ceaef5265801d76 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/59734e622f104e939ceaef5265801d76 2024-11-26T10:28:14,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/59734e622f104e939ceaef5265801d76, entries=1, sequenceid=45, filesize=5.9 K 2024-11-26T10:28:14,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for f29613695113f1cf4650e021f3894b68 in 430ms, sequenceid=45, compaction requested=false 2024-11-26T10:28:14,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:14,619 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-26T10:28:14,619 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:14,619 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 because midkey is the same as first or last row 2024-11-26T10:28:14,722 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,738 WARN [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]] 2024-11-26T10:28:14,738 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,738 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C35079%2C1732616872640:(num 1732616892719) roll requested 2024-11-26T10:28:14,739 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.1732616894738 2024-11-26T10:28:14,742 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43537 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,742 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42942 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741874_1057 to mirror 127.0.0.1:43537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,743 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:14,743 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741874_1057 2024-11-26T10:28:14,743 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42942 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:14,743 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42942 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42942 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,743 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:14,744 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,744 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:14,744 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741875_1058 2024-11-26T10:28:14,745 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:14,747 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46551 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,747 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42948 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741876_1059 to mirror 127.0.0.1:46551 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,747 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:14,747 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741876_1059 2024-11-26T10:28:14,747 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42948 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:14,747 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42948 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42948 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,748 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:14,750 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44797 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:14,750 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42962 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741877_1060 to mirror 127.0.0.1:44797 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,750 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:14,750 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741877_1060 2024-11-26T10:28:14,750 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42962 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:14,750 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42962 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42962 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:14,750 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:14,751 WARN [IPC Server handler 0 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:14,751 WARN [IPC Server handler 0 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:14,751 WARN [IPC Server handler 0 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:14,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:14,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:14,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:14,753 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:14,754 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:14,754 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616892719 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616894738 2024-11-26T10:28:14,755 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42409:42409)] 2024-11-26T10:28:14,755 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:14,755 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616892719 is not closed yet, will try archiving it next time 2024-11-26T10:28:14,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741858_1041 (size=13591) 2024-11-26T10:28:14,755 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616888702 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs/94eedbb855cf%2C35079%2C1732616872640.1732616888702 2024-11-26T10:28:15,156 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 is not closed yet, will try archiving it next time 2024-11-26T10:28:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35079 {}] regionserver.HRegion(8855): Flush requested on f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:15,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-26T10:28:15,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/ede3d2a8de534a3e818c602f47d48f3e is 1079, key is tmprow/info:/1732616895605/Put/seqid=0 2024-11-26T10:28:15,613 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:15,613 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:15,614 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741879_1062 2024-11-26T10:28:15,614 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:15,615 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:15,615 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:15,615 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741880_1063 2024-11-26T10:28:15,616 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:15,617 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:15,617 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:15,617 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741881_1064 2024-11-26T10:28:15,617 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:15,619 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:15,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42978 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741882_1065 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:15,619 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:15,619 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741882_1065 2024-11-26T10:28:15,620 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42978 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:15,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42978 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42978 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:15,620 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:15,620 WARN [IPC Server handler 3 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:15,621 WARN [IPC Server handler 3 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:15,621 WARN [IPC Server handler 3 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:15,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741883_1066 (size=6027) 2024-11-26T10:28:15,850 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/ede3d2a8de534a3e818c602f47d48f3e 2024-11-26T10:28:16,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/ede3d2a8de534a3e818c602f47d48f3e as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/ede3d2a8de534a3e818c602f47d48f3e 2024-11-26T10:28:16,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/ede3d2a8de534a3e818c602f47d48f3e, entries=1, sequenceid=55, filesize=5.9 K 2024-11-26T10:28:16,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for f29613695113f1cf4650e021f3894b68 in 430ms, sequenceid=55, compaction requested=true 2024-11-26T10:28:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-26T10:28:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 because midkey is the same as first or last row 2024-11-26T10:28:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f29613695113f1cf4650e021f3894b68:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:28:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:28:16,037 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:28:16,038 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:28:16,039 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HStore(1541): f29613695113f1cf4650e021f3894b68/info is initiating minor compaction (all files) 2024-11-26T10:28:16,039 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f29613695113f1cf4650e021f3894b68/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:16,039 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/59734e622f104e939ceaef5265801d76, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/ede3d2a8de534a3e818c602f47d48f3e] into tmpdir=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp, totalSize=29.3 K 2024-11-26T10:28:16,039 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e3460fa04d94ee3aac258a150aea2e6, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732616886711 2024-11-26T10:28:16,040 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 59734e622f104e939ceaef5265801d76, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732616894188 2024-11-26T10:28:16,040 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] compactions.Compactor(225): Compacting ede3d2a8de534a3e818c602f47d48f3e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732616895605 2024-11-26T10:28:16,056 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f29613695113f1cf4650e021f3894b68#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:28:16,056 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/0ea344a3b88d41d29b89398e744150fd is 1080, key is row0002/info:/1732616886711/Put/seqid=0 2024-11-26T10:28:16,058 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,058 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:16,058 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741884_1067 2024-11-26T10:28:16,059 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:16,060 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,060 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:16,060 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741885_1068 2024-11-26T10:28:16,061 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:16,063 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46551 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42998 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741886_1069 to mirror 127.0.0.1:46551 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:16,063 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]) is bad. 2024-11-26T10:28:16,063 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741886_1069 2024-11-26T10:28:16,063 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42998 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:16,063 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:42998 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42998 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:16,063 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46551,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK] 2024-11-26T10:28:16,065 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:43002 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741887_1070 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:16,065 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:16,065 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741887_1070 2024-11-26T10:28:16,065 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:43002 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:16,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:43002 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43002 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:16,066 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:16,066 WARN [IPC Server handler 0 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-26T10:28:16,066 WARN [IPC Server handler 0 on default port 43805 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-26T10:28:16,066 WARN [IPC Server handler 0 on default port 43805 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-26T10:28:16,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741888_1071 (size=18097) 2024-11-26T10:28:16,078 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/0ea344a3b88d41d29b89398e744150fd as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/0ea344a3b88d41d29b89398e744150fd 2024-11-26T10:28:16,086 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f29613695113f1cf4650e021f3894b68/info of f29613695113f1cf4650e021f3894b68 into 0ea344a3b88d41d29b89398e744150fd(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:28:16,086 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:16,086 INFO [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68., storeName=f29613695113f1cf4650e021f3894b68/info, priority=13, startTime=1732616896037; duration=0sec 2024-11-26T10:28:16,086 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-26T10:28:16,086 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:16,086 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/0ea344a3b88d41d29b89398e744150fd because midkey is the same as first or last row 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/0ea344a3b88d41d29b89398e744150fd because midkey is the same as first or last row 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/0ea344a3b88d41d29b89398e744150fd because midkey is the same as first or last row 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:28:16,087 DEBUG [RS:0;94eedbb855cf:35079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f29613695113f1cf4650e021f3894b68:info 2024-11-26T10:28:16,412 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2405ab2e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741868_1051 to 127.0.0.1:43537 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:16,412 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741873_1056 to 127.0.0.1:44797 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:16,722 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,756 WARN [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-26T10:28:16,756 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:16,828 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:16,831 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:16,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:16,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:16,832 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:16,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1d8e25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:16,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13c2f5a4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:16,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3163184e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/java.io.tmpdir/jetty-localhost-33399-hadoop-hdfs-3_4_1-tests_jar-_-any-6317625295014309192/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:16,946 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@360084d5{HTTP/1.1, (http/1.1)}{localhost:33399} 2024-11-26T10:28:16,946 INFO [Time-limited test {}] server.Server(415): Started @129094ms 2024-11-26T10:28:16,948 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:17,046 WARN [Thread-993 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:17,054 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeaac651b61d2db45 with lease ID 0xc3f1d4fea88d042d: from storage DS-f8481b4f-a002-4859-823b-17ed26ca74f7 node DatanodeRegistration(127.0.0.1:34659, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=33697, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:28:17,054 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeaac651b61d2db45 with lease ID 0xc3f1d4fea88d042d: from storage DS-1564ff9f-fbc1-4533-94ec-c95f83bfd18f node DatanodeRegistration(127.0.0.1:34659, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=33697, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:17,412 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45009, datanodeUuid=98e0500c-0229-4fce-9064-0a5963e91032, infoPort=42409, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741858_1041 to 127.0.0.1:44797 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:17,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741883_1066 (size=6027) 2024-11-26T10:28:17,851 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:18,723 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:18,756 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:19,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741888_1071 (size=18097) 2024-11-26T10:28:19,851 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:20,723 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:20,756 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:21,851 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,560 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:28:22,723 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,757 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,849 ERROR [FSHLog-0-hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData-prefix:94eedbb855cf,44935,1732616872581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,849 WARN [FSHLog-0-hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData-prefix:94eedbb855cf,44935,1732616872581 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,849 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C44935%2C1732616872581:(num 1732616872741) roll requested 2024-11-26T10:28:22,850 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C44935%2C1732616872581.1732616902850 2024-11-26T10:28:22,853 WARN [Thread-1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,853 WARN [Thread-1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:22,853 WARN [Thread-1015 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741889_1072 2024-11-26T10:28:22,853 WARN [Thread-1015 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:22,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:22,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:22,858 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:22,858 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:22,858 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:22,858 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616902850 2024-11-26T10:28:22,858 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,859 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:22,859 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 2024-11-26T10:28:22,859 WARN [IPC Server handler 2 on default port 43805 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-26T10:28:22,859 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42409:42409),(127.0.0.1/127.0.0.1:33697:33697)] 2024-11-26T10:28:22,859 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 is not closed yet, will try archiving it next time 2024-11-26T10:28:22,859 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 after 0ms 2024-11-26T10:28:23,852 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:24,757 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:25,852 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:26,757 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:26,860 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 after 4001ms 2024-11-26T10:28:27,066 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1c05b831 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:44797,null,null]) java.net.ConnectException: Call From 94eedbb855cf/172.17.0.2 to localhost:44319 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-26T10:28:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741832_1020 (size=455) 2024-11-26T10:28:27,724 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616873103 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs/94eedbb855cf%2C35079%2C1732616872640.1732616873103 2024-11-26T10:28:27,725 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616892719 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs/94eedbb855cf%2C35079%2C1732616872640.1732616892719 2024-11-26T10:28:27,852 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:28,050 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2dd3469e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34659, datanodeUuid=0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11, infoPort=33697, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=163080904;c=1732616871772):Failed to transfer BP-482080985-172.17.0.2-1732616871772:blk_1073741832_1020 to 127.0.0.1:44797 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:28,758 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:29,853 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,570 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.1732616910569 2024-11-26T10:28:30,573 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,573 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:30,573 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741891_1075 2024-11-26T10:28:30,573 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:30,575 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-958811595_22 at /127.0.0.1:56340 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data4]'}, localName='127.0.0.1:34659', datanodeUuid='0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741892_1076 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:30,576 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:30,576 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741892_1076 2024-11-26T10:28:30,576 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-958811595_22 at /127.0.0.1:56340 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:30,576 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-958811595_22 at /127.0.0.1:56340 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:34659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56340 dst: /127.0.0.1:34659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:30,576 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:30,577 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,577 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:30,578 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741893_1077 2024-11-26T10:28:30,578 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:30,582 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,582 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,582 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616894738 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616910569 2024-11-26T10:28:30,583 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33697:33697),(127.0.0.1/127.0.0.1:42409:42409)] 2024-11-26T10:28:30,583 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616894738 is not closed yet, will try archiving it next time 2024-11-26T10:28:30,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741878_1061 (size=12911) 2024-11-26T10:28:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35079 {}] regionserver.HRegion(8855): Flush requested on f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:30,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-26T10:28:30,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/92b1a3862bec4c92a7952e822b776b8b is 1080, key is row0013/info:/1732616910585/Put/seqid=0 2024-11-26T10:28:30,594 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,594 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:30,594 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741895_1079 2024-11-26T10:28:30,595 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741896_1080 (size=8190) 2024-11-26T10:28:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741896_1080 (size=8190) 2024-11-26T10:28:30,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/92b1a3862bec4c92a7952e822b776b8b 2024-11-26T10:28:30,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/92b1a3862bec4c92a7952e822b776b8b as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/92b1a3862bec4c92a7952e822b776b8b 2024-11-26T10:28:30,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/92b1a3862bec4c92a7952e822b776b8b, entries=3, sequenceid=66, filesize=8.0 K 2024-11-26T10:28:30,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for f29613695113f1cf4650e021f3894b68 in 32ms, sequenceid=66, compaction requested=false 2024-11-26T10:28:30,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f29613695113f1cf4650e021f3894b68: 2024-11-26T10:28:30,620 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-26T10:28:30,620 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:28:30,620 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/0ea344a3b88d41d29b89398e744150fd because midkey is the same as first or last row 2024-11-26T10:28:30,758 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-26T10:28:30,758 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:28:30,805 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:28:30,805 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:28:30,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:30,806 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:30,806 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:28:30,806 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:28:30,806 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=767977449, stopped=false 2024-11-26T10:28:30,806 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,44935,1732616872581 2024-11-26T10:28:30,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:28:30,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:28:30,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:28:30,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:30,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:30,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:30,808 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:28:30,808 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:28:30,808 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:28:30,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:30,808 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:28:30,808 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:28:30,809 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,35079,1732616872640' ***** 2024-11-26T10:28:30,809 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:28:30,809 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,39711,1732616873796' ***** 2024-11-26T10:28:30,809 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:28:30,809 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:28:30,809 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:28:30,809 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:28:30,809 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:28:30,809 INFO [RS:0;94eedbb855cf:35079 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:28:30,809 INFO [RS:0;94eedbb855cf:35079 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:28:30,809 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(3091): Received CLOSE for f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:30,809 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:28:30,809 INFO [RS:1;94eedbb855cf:39711 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:28:30,810 INFO [RS:1;94eedbb855cf:39711 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:28:30,810 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,39711,1732616873796 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,35079,1732616872640 2024-11-26T10:28:30,810 INFO [RS:1;94eedbb855cf:39711 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:28:30,810 INFO [RS:1;94eedbb855cf:39711 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;94eedbb855cf:39711. 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:35079. 2024-11-26T10:28:30,810 DEBUG [RS:1;94eedbb855cf:39711 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:28:30,810 DEBUG [RS:0;94eedbb855cf:35079 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:28:30,810 DEBUG [RS:1;94eedbb855cf:39711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:30,810 DEBUG [RS:0;94eedbb855cf:35079 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:30,810 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f29613695113f1cf4650e021f3894b68, disabling compactions & flushes 2024-11-26T10:28:30,810 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,39711,1732616873796; all regions closed. 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:28:30,810 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:28:30,810 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:30,810 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. after waiting 0 ms 2024-11-26T10:28:30,810 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:28:30,810 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:30,810 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing f29613695113f1cf4650e021f3894b68 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-26T10:28:30,811 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-26T10:28:30,811 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f29613695113f1cf4650e021f3894b68=TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.} 2024-11-26T10:28:30,811 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,811 DEBUG [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f29613695113f1cf4650e021f3894b68 2024-11-26T10:28:30,811 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:28:30,811 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,811 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:28:30,811 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:28:30,811 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:28:30,811 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:28:30,811 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,811 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-26T10:28:30,811 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,811 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,811 ERROR [FSHLog-0-hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf-prefix:94eedbb855cf,35079,1732616872640.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,812 WARN [FSHLog-0-hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf-prefix:94eedbb855cf,35079,1732616872640.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,812 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,812 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C35079%2C1732616872640.meta:.meta(num 1732616873623) roll requested 2024-11-26T10:28:30,812 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,812 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 2024-11-26T10:28:30,812 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C35079%2C1732616872640.meta.1732616910812.meta 2024-11-26T10:28:30,813 WARN [IPC Server handler 4 on default port 43805 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741837_1013 2024-11-26T10:28:30,813 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 after 1ms 2024-11-26T10:28:30,815 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,815 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:30,815 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741897_1082 2024-11-26T10:28:30,816 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:30,818 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43537 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,818 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:30,818 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:57438 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data4]'}, localName='127.0.0.1:34659', datanodeUuid='0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741898_1083 to mirror 127.0.0.1:43537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:30,818 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741898_1083 2024-11-26T10:28:30,818 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:57438 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-26T10:28:30,818 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:57438 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:34659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57438 dst: /127.0.0.1:34659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:30,818 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:30,819 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/55bf3ccb4c294548807d2f0fd8a7510a is 1080, key is row0015/info:/1732616910589/Put/seqid=0 2024-11-26T10:28:30,820 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,820 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741899_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:30,820 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741899_1084 2024-11-26T10:28:30,820 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:30,820 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,820 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741900_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:30,820 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741900_1085 2024-11-26T10:28:30,821 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:30,822 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,822 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741902_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:30,822 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741902_1087 2024-11-26T10:28:30,823 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:30,824 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,824 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:30,824 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741903_1088 2024-11-26T10:28:30,824 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:30,824 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,824 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:30,825 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616910812.meta 2024-11-26T10:28:30,826 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,826 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,826 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta 2024-11-26T10:28:30,827 WARN [IPC Server handler 0 on default port 43805 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta has not been closed. Lease recovery is in progress. RecoveryId = 1090 for block blk_1073741834_1010 2024-11-26T10:28:30,827 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta after 1ms 2024-11-26T10:28:30,833 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33697:33697),(127.0.0.1/127.0.0.1:42409:42409)] 2024-11-26T10:28:30,833 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta is not closed yet, will try archiving it next time 2024-11-26T10:28:30,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741904_1089 (size=14660) 2024-11-26T10:28:30,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741904_1089 (size=14660) 2024-11-26T10:28:30,835 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/55bf3ccb4c294548807d2f0fd8a7510a 2024-11-26T10:28:30,842 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/.tmp/info/55bf3ccb4c294548807d2f0fd8a7510a as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/55bf3ccb4c294548807d2f0fd8a7510a 2024-11-26T10:28:30,848 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/55bf3ccb4c294548807d2f0fd8a7510a, entries=9, sequenceid=78, filesize=14.3 K 2024-11-26T10:28:30,850 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for f29613695113f1cf4650e021f3894b68 in 40ms, sequenceid=78, compaction requested=true 2024-11-26T10:28:30,850 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/57c1996f6b5649ecbf5b01cbe39d8b04, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/94f859be0c9d4e85b69f2d82a6354338, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/59734e622f104e939ceaef5265801d76, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/ede3d2a8de534a3e818c602f47d48f3e] to archive 2024-11-26T10:28:30,852 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:28:30,854 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/info/2898726312e64842ac706a1976b289c1 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68./info:regioninfo/1732616874280/Put/seqid=0 2024-11-26T10:28:30,854 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/57c1996f6b5649ecbf5b01cbe39d8b04 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/57c1996f6b5649ecbf5b01cbe39d8b04 2024-11-26T10:28:30,856 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/409df1edc5434b86a534ee0442904dce 2024-11-26T10:28:30,857 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/8e3460fa04d94ee3aac258a150aea2e6 2024-11-26T10:28:30,859 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/94f859be0c9d4e85b69f2d82a6354338 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/94f859be0c9d4e85b69f2d82a6354338 2024-11-26T10:28:30,860 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/59734e622f104e939ceaef5265801d76 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/59734e622f104e939ceaef5265801d76 2024-11-26T10:28:30,861 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/ede3d2a8de534a3e818c602f47d48f3e to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/info/ede3d2a8de534a3e818c602f47d48f3e 2024-11-26T10:28:30,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741905_1091 (size=7089) 2024-11-26T10:28:30,862 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=94eedbb855cf:44935 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-26T10:28:30,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741905_1091 (size=7089) 2024-11-26T10:28:30,862 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [57c1996f6b5649ecbf5b01cbe39d8b04=10347, 409df1edc5434b86a534ee0442904dce=12506, 8e3460fa04d94ee3aac258a150aea2e6=17994, 94f859be0c9d4e85b69f2d82a6354338=6027, 59734e622f104e939ceaef5265801d76=6027, ede3d2a8de534a3e818c602f47d48f3e=6027] 2024-11-26T10:28:30,863 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/info/2898726312e64842ac706a1976b289c1 2024-11-26T10:28:30,868 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f29613695113f1cf4650e021f3894b68/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-26T10:28:30,868 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:30,869 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f29613695113f1cf4650e021f3894b68: Waiting for close lock at 1732616910810Running coprocessor pre-close hooks at 1732616910810Disabling compacts and flushes for region at 1732616910810Disabling writes for close at 1732616910810Obtaining lock to block concurrent updates at 1732616910810Preparing flush snapshotting stores in f29613695113f1cf4650e021f3894b68 at 1732616910810Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732616910811 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. at 1732616910812 (+1 ms)Flushing f29613695113f1cf4650e021f3894b68/info: creating writer at 1732616910813 (+1 ms)Flushing f29613695113f1cf4650e021f3894b68/info: appending metadata at 1732616910818 (+5 ms)Flushing f29613695113f1cf4650e021f3894b68/info: closing flushed file at 1732616910818Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59159e8: reopening flushed file at 1732616910841 (+23 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for f29613695113f1cf4650e021f3894b68 in 40ms, sequenceid=78, compaction requested=true at 1732616910850 (+9 ms)Writing region close event to WAL at 1732616910863 (+13 ms)Running coprocessor post-close hooks at 1732616910868 (+5 ms)Closed at 1732616910868 2024-11-26T10:28:30,869 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732616873897.f29613695113f1cf4650e021f3894b68. 2024-11-26T10:28:30,884 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/ns/9c4aa932de8e45e6be62a63a9a003b3a is 43, key is default/ns:d/1732616873672/Put/seqid=0 2024-11-26T10:28:30,886 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,886 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:30,886 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741906_1092 2024-11-26T10:28:30,886 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:30,887 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,887 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:30,887 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741907_1093 2024-11-26T10:28:30,888 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:30,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741908_1094 (size=5153) 2024-11-26T10:28:30,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741908_1094 (size=5153) 2024-11-26T10:28:30,893 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/ns/9c4aa932de8e45e6be62a63a9a003b3a 2024-11-26T10:28:30,915 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/table/e640f6f121f64cd49f1d03bbb4a6e974 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732616874292/Put/seqid=0 2024-11-26T10:28:30,917 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,917 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK], DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK]) is bad. 2024-11-26T10:28:30,918 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741909_1095 2024-11-26T10:28:30,918 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-88e036a2-3e8f-4d8e-9db0-6dc78d73c8c5,DISK] 2024-11-26T10:28:30,918 INFO [regionserver/94eedbb855cf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-26T10:28:30,919 INFO [regionserver/94eedbb855cf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-26T10:28:30,920 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,920 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:53940 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741910_1096] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8]'}, localName='127.0.0.1:45009', datanodeUuid='98e0500c-0229-4fce-9064-0a5963e91032', xmitsInProgress=0}:Exception transferring block BP-482080985-172.17.0.2-1732616871772:blk_1073741910_1096 to mirror 127.0.0.1:36471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:30,921 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45009,DS-69e02dbc-743b-40eb-8060-6a77d32c2103,DISK], DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK]) is bad. 2024-11-26T10:28:30,921 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741910_1096 2024-11-26T10:28:30,921 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:53940 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741910_1096] {}] datanode.BlockReceiver(316): Block 1073741910 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-26T10:28:30,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_243660665_22 at /127.0.0.1:53940 [Receiving block BP-482080985-172.17.0.2-1732616871772:blk_1073741910_1096] {}] datanode.DataXceiver(331): 127.0.0.1:45009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53940 dst: /127.0.0.1:45009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:30,921 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36471,DS-95875dab-37fd-430a-a723-37131bdf6cfa,DISK] 2024-11-26T10:28:30,922 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:30,923 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-482080985-172.17.0.2-1732616871772:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK], DatanodeInfoWithStorage[127.0.0.1:34659,DS-f8481b4f-a002-4859-823b-17ed26ca74f7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK]) is bad. 2024-11-26T10:28:30,923 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-482080985-172.17.0.2-1732616871772:blk_1073741911_1097 2024-11-26T10:28:30,923 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44797,DS-51c1a3b7-694b-4b53-9210-26d4deba4708,DISK] 2024-11-26T10:28:30,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741912_1098 (size=5424) 2024-11-26T10:28:30,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741912_1098 (size=5424) 2024-11-26T10:28:30,929 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/table/e640f6f121f64cd49f1d03bbb4a6e974 2024-11-26T10:28:30,934 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/info/2898726312e64842ac706a1976b289c1 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/info/2898726312e64842ac706a1976b289c1 2024-11-26T10:28:30,940 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/info/2898726312e64842ac706a1976b289c1, entries=10, sequenceid=11, filesize=6.9 K 2024-11-26T10:28:30,941 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/ns/9c4aa932de8e45e6be62a63a9a003b3a as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/ns/9c4aa932de8e45e6be62a63a9a003b3a 2024-11-26T10:28:30,946 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/ns/9c4aa932de8e45e6be62a63a9a003b3a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-26T10:28:30,947 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/.tmp/table/e640f6f121f64cd49f1d03bbb4a6e974 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/table/e640f6f121f64cd49f1d03bbb4a6e974 2024-11-26T10:28:30,951 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:28:30,952 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/table/e640f6f121f64cd49f1d03bbb4a6e974, entries=2, sequenceid=11, filesize=5.3 K 2024-11-26T10:28:30,953 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-11-26T10:28:30,958 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-26T10:28:30,958 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:28:30,959 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:28:30,959 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616910811Running coprocessor pre-close hooks at 1732616910811Disabling compacts and flushes for region at 1732616910811Disabling writes for close at 1732616910811Obtaining lock to block concurrent updates at 1732616910811Preparing flush snapshotting stores in 1588230740 at 1732616910811Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732616910811Flushing stores of hbase:meta,,1.1588230740 at 1732616910833 (+22 ms)Flushing 1588230740/info: creating writer at 1732616910833Flushing 1588230740/info: appending metadata at 1732616910853 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732616910853Flushing 1588230740/ns: creating writer at 1732616910869 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732616910883 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732616910883Flushing 1588230740/table: creating writer at 1732616910899 (+16 ms)Flushing 1588230740/table: appending metadata at 1732616910915 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732616910915Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@599d0c1c: reopening flushed file at 1732616910934 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29d23b91: reopening flushed file at 1732616910940 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2aed9819: reopening flushed file at 1732616910946 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1732616910953 (+7 ms)Writing region close event to WAL at 1732616910954 (+1 ms)Running coprocessor post-close hooks at 1732616910958 (+4 ms)Closed at 1732616910958 2024-11-26T10:28:30,959 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:28:30,973 INFO [regionserver/94eedbb855cf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-26T10:28:30,973 INFO [regionserver/94eedbb855cf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-26T10:28:30,985 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.1732616894738 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs/94eedbb855cf%2C35079%2C1732616872640.1732616894738 2024-11-26T10:28:31,011 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,35079,1732616872640; all regions closed. 2024-11-26T10:28:31,011 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:31,012 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:31,012 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:31,012 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:31,012 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:31,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741901_1086 (size=825) 2024-11-26T10:28:31,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741901_1086 (size=825) 2024-11-26T10:28:31,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741878_1061 (size=12911) 2024-11-26T10:28:31,850 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:28:33,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-26T10:28:33,255 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:28:33,255 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-26T10:28:34,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741833_1009 (size=32) 2024-11-26T10:28:34,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741835_1011 (size=393) 2024-11-26T10:28:34,570 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-26T10:28:34,570 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-26T10:28:34,814 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 after 4002ms 2024-11-26T10:28:34,828 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta after 4002ms 2024-11-26T10:28:35,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:28:35,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:28:35,812 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-26T10:28:35,814 DEBUG [RS:1;94eedbb855cf:39711 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs 2024-11-26T10:28:35,814 INFO [RS:1;94eedbb855cf:39711 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C39711%2C1732616873796:(num 1732616874000) 2024-11-26T10:28:35,814 DEBUG [RS:1;94eedbb855cf:39711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:35,814 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:28:35,814 INFO [RS:1;94eedbb855cf:39711 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:28:35,814 INFO [RS:1;94eedbb855cf:39711 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-26T10:28:35,815 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:28:35,815 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:28:35,815 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:28:35,815 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:28:35,815 INFO [RS:1;94eedbb855cf:39711 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:28:35,815 INFO [RS:1;94eedbb855cf:39711 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39711 2024-11-26T10:28:35,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:28:35,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,39711,1732616873796 2024-11-26T10:28:35,817 INFO [RS:1;94eedbb855cf:39711 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:28:35,818 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,39711,1732616873796] 2024-11-26T10:28:35,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:35,820 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,39711,1732616873796 already deleted, retry=false 2024-11-26T10:28:35,820 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,39711,1732616873796 expired; onlineServers=1 2024-11-26T10:28:35,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:28:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39711-0x10153d14d020002, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:28:35,918 INFO [RS:1;94eedbb855cf:39711 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:28:35,918 INFO [RS:1;94eedbb855cf:39711 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,39711,1732616873796; zookeeper connection closed. 2024-11-26T10:28:35,919 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35f0c739 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35f0c739 2024-11-26T10:28:36,012 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-26T10:28:36,016 DEBUG [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs 2024-11-26T10:28:36,016 INFO [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C35079%2C1732616872640.meta:.meta(num 1732616910812) 2024-11-26T10:28:36,016 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,016 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,017 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,017 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,017 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741894_1078 (size=14682) 2024-11-26T10:28:36,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741894_1078 (size=14682) 2024-11-26T10:28:36,021 DEBUG [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs 2024-11-26T10:28:36,021 INFO [RS:0;94eedbb855cf:35079 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C35079%2C1732616872640:(num 1732616910569) 2024-11-26T10:28:36,021 DEBUG [RS:0;94eedbb855cf:35079 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:36,021 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:28:36,021 INFO [RS:0;94eedbb855cf:35079 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:28:36,021 INFO [RS:0;94eedbb855cf:35079 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-26T10:28:36,021 INFO [RS:0;94eedbb855cf:35079 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:28:36,022 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:28:36,022 INFO [RS:0;94eedbb855cf:35079 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35079 2024-11-26T10:28:36,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,35079,1732616872640 2024-11-26T10:28:36,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:28:36,024 INFO [RS:0;94eedbb855cf:35079 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:28:36,025 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,35079,1732616872640] 2024-11-26T10:28:36,027 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,35079,1732616872640 already deleted, retry=false 2024-11-26T10:28:36,027 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,35079,1732616872640 expired; onlineServers=0 2024-11-26T10:28:36,027 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,44935,1732616872581' ***** 2024-11-26T10:28:36,027 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:28:36,027 INFO [M:0;94eedbb855cf:44935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:28:36,027 INFO [M:0;94eedbb855cf:44935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:28:36,027 DEBUG [M:0;94eedbb855cf:44935 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:28:36,027 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:28:36,027 DEBUG [M:0;94eedbb855cf:44935 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:28:36,027 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616872865 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616872865,5,FailOnTimeoutGroup] 2024-11-26T10:28:36,027 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616872852 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616872852,5,FailOnTimeoutGroup] 2024-11-26T10:28:36,027 INFO [M:0;94eedbb855cf:44935 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:28:36,028 INFO [M:0;94eedbb855cf:44935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:28:36,028 DEBUG [M:0;94eedbb855cf:44935 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:28:36,028 INFO [M:0;94eedbb855cf:44935 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:28:36,028 INFO [M:0;94eedbb855cf:44935 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:28:36,028 INFO [M:0;94eedbb855cf:44935 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:28:36,028 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:28:36,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:28:36,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:36,029 DEBUG [M:0;94eedbb855cf:44935 {}] zookeeper.ZKUtil(347): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:28:36,029 WARN [M:0;94eedbb855cf:44935 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:28:36,030 INFO [M:0;94eedbb855cf:44935 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/.lastflushedseqids 2024-11-26T10:28:36,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741913_1099 (size=130) 2024-11-26T10:28:36,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741913_1099 (size=130) 2024-11-26T10:28:36,037 INFO [M:0;94eedbb855cf:44935 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:28:36,037 INFO [M:0;94eedbb855cf:44935 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:28:36,037 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:28:36,037 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:36,037 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:36,037 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:28:36,037 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:36,037 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-26T10:28:36,057 DEBUG [M:0;94eedbb855cf:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd4e4293e6104e3ab0eea7535f8eebbd is 82, key is hbase:meta,,1/info:regioninfo/1732616873656/Put/seqid=0 2024-11-26T10:28:36,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741914_1100 (size=5672) 2024-11-26T10:28:36,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741914_1100 (size=5672) 2024-11-26T10:28:36,063 INFO [M:0;94eedbb855cf:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd4e4293e6104e3ab0eea7535f8eebbd 2024-11-26T10:28:36,084 DEBUG [M:0;94eedbb855cf:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c543fe5e08924168b200bf3c3ce4a472 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732616874298/Put/seqid=0 2024-11-26T10:28:36,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741915_1101 (size=6255) 2024-11-26T10:28:36,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741915_1101 (size=6255) 2024-11-26T10:28:36,090 INFO [M:0;94eedbb855cf:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c543fe5e08924168b200bf3c3ce4a472 2024-11-26T10:28:36,095 INFO [M:0;94eedbb855cf:44935 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c543fe5e08924168b200bf3c3ce4a472 2024-11-26T10:28:36,109 DEBUG [M:0;94eedbb855cf:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f4516ea48dd941b483b62b202a9ca17e is 69, key is 94eedbb855cf,35079,1732616872640/rs:state/1732616872902/Put/seqid=0 2024-11-26T10:28:36,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741916_1102 (size=5224) 2024-11-26T10:28:36,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741916_1102 (size=5224) 2024-11-26T10:28:36,115 INFO [M:0;94eedbb855cf:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f4516ea48dd941b483b62b202a9ca17e 2024-11-26T10:28:36,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:28:36,125 INFO [RS:0;94eedbb855cf:35079 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:28:36,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35079-0x10153d14d020001, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:28:36,125 INFO [RS:0;94eedbb855cf:35079 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,35079,1732616872640; zookeeper connection closed. 2024-11-26T10:28:36,126 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@591e06ea {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@591e06ea 2024-11-26T10:28:36,126 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-26T10:28:36,135 DEBUG [M:0;94eedbb855cf:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7c8c286d16b14916a86f1e787120f465 is 52, key is load_balancer_on/state:d/1732616873780/Put/seqid=0 2024-11-26T10:28:36,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741917_1103 (size=5056) 2024-11-26T10:28:36,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741917_1103 (size=5056) 2024-11-26T10:28:36,141 INFO [M:0;94eedbb855cf:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7c8c286d16b14916a86f1e787120f465 2024-11-26T10:28:36,147 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd4e4293e6104e3ab0eea7535f8eebbd as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd4e4293e6104e3ab0eea7535f8eebbd 2024-11-26T10:28:36,152 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd4e4293e6104e3ab0eea7535f8eebbd, entries=8, sequenceid=60, filesize=5.5 K 2024-11-26T10:28:36,153 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c543fe5e08924168b200bf3c3ce4a472 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c543fe5e08924168b200bf3c3ce4a472 2024-11-26T10:28:36,158 INFO [M:0;94eedbb855cf:44935 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c543fe5e08924168b200bf3c3ce4a472 2024-11-26T10:28:36,158 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c543fe5e08924168b200bf3c3ce4a472, entries=6, sequenceid=60, filesize=6.1 K 2024-11-26T10:28:36,159 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f4516ea48dd941b483b62b202a9ca17e as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f4516ea48dd941b483b62b202a9ca17e 2024-11-26T10:28:36,163 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f4516ea48dd941b483b62b202a9ca17e, entries=2, sequenceid=60, filesize=5.1 K 2024-11-26T10:28:36,164 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7c8c286d16b14916a86f1e787120f465 as hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7c8c286d16b14916a86f1e787120f465 2024-11-26T10:28:36,169 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7c8c286d16b14916a86f1e787120f465, entries=1, sequenceid=60, filesize=4.9 K 2024-11-26T10:28:36,170 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=60, compaction requested=false 2024-11-26T10:28:36,172 INFO [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:36,172 DEBUG [M:0;94eedbb855cf:44935 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616916037Disabling compacts and flushes for region at 1732616916037Disabling writes for close at 1732616916037Obtaining lock to block concurrent updates at 1732616916037Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732616916037Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732616916038 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732616916039 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732616916039Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732616916057 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732616916057Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732616916068 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732616916083 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732616916083Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732616916095 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732616916109 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732616916109Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732616916120 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732616916134 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732616916134Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f5e2fa0: reopening flushed file at 1732616916146 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64184f8d: reopening flushed file at 1732616916152 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d00592a: reopening flushed file at 1732616916158 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3043945f: reopening flushed file at 1732616916164 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=60, compaction requested=false at 1732616916170 (+6 ms)Writing region close event to WAL at 1732616916172 (+2 ms)Closed at 1732616916172 2024-11-26T10:28:36,172 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,173 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,173 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,173 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,173 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:36,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34659 is added to blk_1073741890_1073 (size=1045) 2024-11-26T10:28:36,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741890_1073 (size=1045) 2024-11-26T10:28:36,395 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:28:36,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:36,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:36,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:37,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:28:37,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45009 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:28:37,070 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@35630ada {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-482080985-172.17.0.2-1732616871772:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:44797,null,null]) java.net.ConnectException: Call From 94eedbb855cf/172.17.0.2 to localhost:44319 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-26T10:28:37,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:37,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:37,868 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/WALs/94eedbb855cf,44935,1732616872581/94eedbb855cf%2C44935%2C1732616872581.1732616872741 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/oldWALs/94eedbb855cf%2C44935%2C1732616872581.1732616872741 2024-11-26T10:28:37,871 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/MasterData/oldWALs/94eedbb855cf%2C44935%2C1732616872581.1732616872741 to hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/oldWALs/94eedbb855cf%2C44935%2C1732616872581.1732616872741$masterlocalwal$ 2024-11-26T10:28:37,872 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:28:37,872 INFO [M:0;94eedbb855cf:44935 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:28:37,872 INFO [M:0;94eedbb855cf:44935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44935 2024-11-26T10:28:37,872 INFO [M:0;94eedbb855cf:44935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:28:37,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:28:37,974 INFO [M:0;94eedbb855cf:44935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:28:37,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x10153d14d020000, quorum=127.0.0.1:58673, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:28:37,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3163184e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:37,977 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@360084d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:37,977 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:37,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13c2f5a4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:37,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1d8e25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:37,979 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:37,979 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482080985-172.17.0.2-1732616871772 (Datanode Uuid 0d9021ae-a61c-4c8b-bb09-5a5ed31a0d11) service to localhost/127.0.0.1:43805 2024-11-26T10:28:37,979 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:37,979 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:37,978 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:44797,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44319 , LocalHost:localPort 94eedbb855cf/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-26T10:28:37,979 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-482080985-172.17.0.2-1732616871772:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34659,null,null], DatanodeInfoWithStorage[127.0.0.1:44797,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-482080985-172.17.0.2-1732616871772 2024-11-26T10:28:37,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data3/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:37,979 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34659,null,null]) java.io.IOException: No block pool offer service for bpid=BP-482080985-172.17.0.2-1732616871772 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:37,980 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44797,null,null]) java.io.IOException: No block pool offer service for bpid=BP-482080985-172.17.0.2-1732616871772 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:37,980 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34659,null,null], DatanodeInfoWithStorage[127.0.0.1:44797,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-482080985-172.17.0.2-1732616871772:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:34659,null,null], DatanodeInfoWithStorage[127.0.0.1:44797,null,null]] 2024-11-26T10:28:37,980 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data4/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:37,980 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:37,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@233be953{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:37,983 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71810790{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:37,983 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:37,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fb99827{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:37,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5e4864{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:37,984 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:37,984 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:37,985 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:37,985 WARN [BP-482080985-172.17.0.2-1732616871772 heartbeating to localhost/127.0.0.1:43805 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482080985-172.17.0.2-1732616871772 (Datanode Uuid 98e0500c-0229-4fce-9064-0a5963e91032) service to localhost/127.0.0.1:43805 2024-11-26T10:28:37,985 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data7/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:37,985 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/cluster_030f89ff-21f0-eca3-51b5-cc6610ca1313/data/data8/current/BP-482080985-172.17.0.2-1732616871772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:37,986 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:37,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a640c70{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:28:37,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26313eab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:37,992 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:37,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c48ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:37,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e7025d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:38,000 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:28:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:28:38,036 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43805 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43805 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fec3cbf7948.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:43805 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46211 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43805 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46211 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43805 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:43805 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fec3cbf7948.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43805 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43805 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43805 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43805 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43805 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=60 (was 121), ProcessCount=11 (was 11), AvailableMemoryMB=6606 (was 7326) 2024-11-26T10:28:38,043 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=60, ProcessCount=11, AvailableMemoryMB=6607 2024-11-26T10:28:38,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.log.dir so I do NOT create it in target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9591e36e-e09e-d990-5b37-3b0e38b8cb2a/hadoop.tmp.dir so I do NOT create it in target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f, deleteOnExit=true 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/test.cache.data in system properties and HBase conf 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:28:38,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:28:38,045 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:28:38,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:28:38,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:28:38,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:28:38,059 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:28:38,130 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:38,135 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:38,136 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:38,136 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:38,136 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:38,137 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:38,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a0844a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:38,138 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a8be7bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:38,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5566be26{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-41411-hadoop-hdfs-3_4_1-tests_jar-_-any-9244930187903595582/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:28:38,251 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d60493b{HTTP/1.1, (http/1.1)}{localhost:41411} 2024-11-26T10:28:38,251 INFO [Time-limited test {}] server.Server(415): Started @150399ms 2024-11-26T10:28:38,264 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:28:38,339 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:38,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:38,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:38,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:38,345 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:28:38,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@109832d2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:38,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bd771df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:38,459 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@119a3311{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-38267-hadoop-hdfs-3_4_1-tests_jar-_-any-11191007313072333948/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:38,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@168478e8{HTTP/1.1, (http/1.1)}{localhost:38267} 2024-11-26T10:28:38,459 INFO [Time-limited test {}] server.Server(415): Started @150607ms 2024-11-26T10:28:38,461 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:38,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:38,497 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:38,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:38,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:38,498 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:38,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@650740c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:38,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@482b646b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:38,561 WARN [Thread-1193 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data2/current/BP-1039670754-172.17.0.2-1732616918076/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:38,561 WARN [Thread-1192 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data1/current/BP-1039670754-172.17.0.2-1732616918076/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:38,578 WARN [Thread-1171 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa90f12aecc50c213 with lease ID 0x5e40b9f250ab4b43: Processing first storage report for DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea from datanode DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=33733, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076) 2024-11-26T10:28:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa90f12aecc50c213 with lease ID 0x5e40b9f250ab4b43: from storage DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea node DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=33733, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa90f12aecc50c213 with lease ID 0x5e40b9f250ab4b43: Processing first storage report for DS-e518e725-eeb2-4087-9263-624bbafa80fb from datanode DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=33733, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076) 2024-11-26T10:28:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa90f12aecc50c213 with lease ID 0x5e40b9f250ab4b43: from storage DS-e518e725-eeb2-4087-9263-624bbafa80fb node DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=33733, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:38,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@144c75a2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-34355-hadoop-hdfs-3_4_1-tests_jar-_-any-11326239177630577059/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:38,615 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37b7928{HTTP/1.1, (http/1.1)}{localhost:34355} 2024-11-26T10:28:38,615 INFO [Time-limited test {}] server.Server(415): Started @150762ms 2024-11-26T10:28:38,616 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:38,705 WARN [Thread-1218 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data3/current/BP-1039670754-172.17.0.2-1732616918076/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:38,706 WARN [Thread-1219 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data4/current/BP-1039670754-172.17.0.2-1732616918076/current, will proceed with Du for space computation calculation, 2024-11-26T10:28:38,723 WARN [Thread-1207 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41a7e6c002b52eff with lease ID 0x5e40b9f250ab4b44: Processing first storage report for DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b from datanode DatanodeRegistration(127.0.0.1:37821, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=36829, infoSecurePort=0, ipcPort=41911, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076) 2024-11-26T10:28:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41a7e6c002b52eff with lease ID 0x5e40b9f250ab4b44: from storage DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b node DatanodeRegistration(127.0.0.1:37821, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=36829, infoSecurePort=0, ipcPort=41911, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41a7e6c002b52eff with lease ID 0x5e40b9f250ab4b44: Processing first storage report for DS-569afac8-182c-4c39-bab7-784b9acd8ebd from datanode DatanodeRegistration(127.0.0.1:37821, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=36829, infoSecurePort=0, ipcPort=41911, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076) 2024-11-26T10:28:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41a7e6c002b52eff with lease ID 0x5e40b9f250ab4b44: from storage DS-569afac8-182c-4c39-bab7-784b9acd8ebd node DatanodeRegistration(127.0.0.1:37821, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=36829, infoSecurePort=0, ipcPort=41911, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:38,740 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20 2024-11-26T10:28:38,743 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/zookeeper_0, clientPort=51786, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:28:38,744 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51786 2024-11-26T10:28:38,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:28:38,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:28:38,755 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7 with version=8 2024-11-26T10:28:38,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:28:38,758 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:28:38,758 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:28:38,759 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37987 2024-11-26T10:28:38,760 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37987 connecting to ZooKeeper ensemble=127.0.0.1:51786 2024-11-26T10:28:38,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379870x0, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:28:38,765 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37987-0x10153d201660000 connected 2024-11-26T10:28:38,780 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,783 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:28:38,783 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7, hbase.cluster.distributed=false 2024-11-26T10:28:38,785 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:28:38,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-26T10:28:38,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37987 2024-11-26T10:28:38,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37987 2024-11-26T10:28:38,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-26T10:28:38,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-26T10:28:38,802 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:28:38,802 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:28:38,803 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39993 2024-11-26T10:28:38,804 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39993 connecting to ZooKeeper ensemble=127.0.0.1:51786 2024-11-26T10:28:38,805 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399930x0, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:28:38,810 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:28:38,810 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39993-0x10153d201660001 connected 2024-11-26T10:28:38,810 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:28:38,812 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:28:38,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:28:38,814 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:28:38,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39993 2024-11-26T10:28:38,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39993 2024-11-26T10:28:38,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39993 2024-11-26T10:28:38,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39993 2024-11-26T10:28:38,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39993 2024-11-26T10:28:38,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:38,827 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:37987 2024-11-26T10:28:38,828 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:38,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:28:38,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:28:38,837 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:28:38,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,839 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:28:38,839 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,37987,1732616918757 from backup master directory 2024-11-26T10:28:38,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:28:38,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:28:38,840 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:28:38,840 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,845 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/hbase.id] with ID: cb3aeda6-f116-45f5-a906-29a97b358965 2024-11-26T10:28:38,845 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/.tmp/hbase.id 2024-11-26T10:28:38,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:28:38,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:28:38,851 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/.tmp/hbase.id]:[hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/hbase.id] 2024-11-26T10:28:38,862 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:38,863 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:28:38,864 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-26T10:28:38,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:28:38,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:28:38,875 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:28:38,876 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:28:38,876 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:28:38,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:28:38,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:28:38,884 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store 2024-11-26T10:28:38,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:28:38,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:28:38,890 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:28:38,890 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:28:38,890 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:38,891 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:38,891 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:28:38,891 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:38,891 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:28:38,891 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616918890Disabling compacts and flushes for region at 1732616918890Disabling writes for close at 1732616918891 (+1 ms)Writing region close event to WAL at 1732616918891Closed at 1732616918891 2024-11-26T10:28:38,891 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/.initializing 2024-11-26T10:28:38,892 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,894 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C37987%2C1732616918757, suffix=, logDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757, archiveDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/oldWALs, maxLogs=10 2024-11-26T10:28:38,894 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C37987%2C1732616918757.1732616918894 2024-11-26T10:28:38,899 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 2024-11-26T10:28:38,899 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36829:36829),(127.0.0.1/127.0.0.1:33733:33733)] 2024-11-26T10:28:38,900 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:28:38,900 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:28:38,900 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,900 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:28:38,903 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:38,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:28:38,905 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:28:38,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:28:38,906 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:28:38,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:28:38,908 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:28:38,909 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,909 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,910 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,911 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,911 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,912 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:28:38,913 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:28:38,917 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:28:38,917 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830677, jitterRate=0.05626128613948822}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:28:38,918 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732616918901Initializing all the Stores at 1732616918901Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616918901Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616918902 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616918902Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616918902Cleaning up temporary data from old regions at 1732616918911 (+9 ms)Region opened successfully at 1732616918918 (+7 ms) 2024-11-26T10:28:38,918 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:28:38,921 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31067cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:28:38,922 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:28:38,923 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:28:38,923 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:28:38,923 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:28:38,923 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-26T10:28:38,924 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-26T10:28:38,924 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:28:38,926 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:28:38,926 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:28:38,930 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:28:38,930 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:28:38,931 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:28:38,932 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:28:38,932 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:28:38,933 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:28:38,934 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:28:38,935 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:28:38,936 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:28:38,938 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:28:38,940 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:28:38,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:28:38,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:28:38,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,942 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,37987,1732616918757, sessionid=0x10153d201660000, setting cluster-up flag (Was=false) 2024-11-26T10:28:38,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,950 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:28:38,951 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:38,959 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:28:38,960 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,37987,1732616918757 2024-11-26T10:28:38,962 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:28:38,963 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:28:38,964 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:28:38,964 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:28:38,964 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,37987,1732616918757 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:28:38,965 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:28:38,965 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:28:38,965 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:28:38,966 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:28:38,966 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:28:38,966 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:38,966 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:28:38,966 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:38,968 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:28:38,968 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732616948969 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:28:38,969 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:28:38,969 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:28:38,969 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:38,970 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:28:38,970 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:28:38,970 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:28:38,970 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:28:38,970 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:28:38,971 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616918970,5,FailOnTimeoutGroup] 2024-11-26T10:28:38,971 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616918971,5,FailOnTimeoutGroup] 2024-11-26T10:28:38,971 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:38,971 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:28:38,971 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:38,971 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:38,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:28:38,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:28:38,979 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:28:38,979 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7 2024-11-26T10:28:38,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:28:38,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:28:38,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:28:38,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:28:38,991 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:28:38,991 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:38,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:28:38,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:28:38,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:38,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:28:38,994 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:28:38,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:38,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:28:38,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:28:38,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:38,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:38,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:28:38,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740 2024-11-26T10:28:38,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740 2024-11-26T10:28:38,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:28:38,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:28:39,000 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:28:39,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:28:39,003 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:28:39,003 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763308, jitterRate=-0.0294039249420166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:28:39,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732616918988Initializing all the Stores at 1732616918989 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616918989Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616918989Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616918989Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616918989Cleaning up temporary data from old regions at 1732616918999 (+10 ms)Region opened successfully at 1732616919004 (+5 ms) 2024-11-26T10:28:39,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:28:39,004 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:28:39,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:28:39,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:28:39,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:28:39,005 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:28:39,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616919004Disabling compacts and flushes for region at 1732616919004Disabling writes for close at 1732616919004Writing region close event to WAL at 1732616919004Closed at 1732616919005 (+1 ms) 2024-11-26T10:28:39,006 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:28:39,006 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:28:39,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:28:39,007 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:28:39,008 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:28:39,017 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(746): ClusterId : cb3aeda6-f116-45f5-a906-29a97b358965 2024-11-26T10:28:39,017 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:28:39,021 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:28:39,021 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:28:39,023 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:28:39,023 DEBUG [RS:0;94eedbb855cf:39993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4040ae33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:28:39,041 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:39993 2024-11-26T10:28:39,041 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:28:39,041 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:28:39,041 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:28:39,042 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,37987,1732616918757 with port=39993, startcode=1732616918801 2024-11-26T10:28:39,042 DEBUG [RS:0;94eedbb855cf:39993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:28:39,044 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55455, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:28:39,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37987 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37987 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,047 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7 2024-11-26T10:28:39,047 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46631 2024-11-26T10:28:39,047 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:28:39,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:28:39,049 DEBUG [RS:0;94eedbb855cf:39993 {}] zookeeper.ZKUtil(111): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,049 WARN [RS:0;94eedbb855cf:39993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:28:39,049 INFO [RS:0;94eedbb855cf:39993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:28:39,049 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,049 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,39993,1732616918801] 2024-11-26T10:28:39,052 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:28:39,054 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:28:39,054 INFO [RS:0;94eedbb855cf:39993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:28:39,054 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,054 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:28:39,055 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:28:39,055 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,055 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,055 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:28:39,056 DEBUG [RS:0;94eedbb855cf:39993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:28:39,058 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,058 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,058 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,058 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,058 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,058 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39993,1732616918801-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:28:39,074 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:28:39,074 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39993,1732616918801-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,074 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,074 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.Replication(171): 94eedbb855cf,39993,1732616918801 started 2024-11-26T10:28:39,088 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,088 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,39993,1732616918801, RpcServer on 94eedbb855cf/172.17.0.2:39993, sessionid=0x10153d201660001 2024-11-26T10:28:39,088 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:28:39,088 DEBUG [RS:0;94eedbb855cf:39993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,088 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,39993,1732616918801' 2024-11-26T10:28:39,088 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,39993,1732616918801' 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:28:39,089 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:28:39,090 DEBUG [RS:0;94eedbb855cf:39993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:28:39,090 INFO [RS:0;94eedbb855cf:39993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:28:39,090 INFO [RS:0;94eedbb855cf:39993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:28:39,159 WARN [94eedbb855cf:37987 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-26T10:28:39,192 INFO [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C39993%2C1732616918801, suffix=, logDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801, archiveDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs, maxLogs=32 2024-11-26T10:28:39,193 INFO [RS:0;94eedbb855cf:39993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:28:39,198 INFO [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:28:39,199 DEBUG [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33733:33733),(127.0.0.1/127.0.0.1:36829:36829)] 2024-11-26T10:28:39,409 DEBUG [94eedbb855cf:37987 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:28:39,410 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,411 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,39993,1732616918801, state=OPENING 2024-11-26T10:28:39,413 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:28:39,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:39,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:28:39,415 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:28:39,415 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:28:39,415 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:28:39,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,39993,1732616918801}] 2024-11-26T10:28:39,569 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:28:39,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32939, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:28:39,575 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:28:39,575 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:28:39,576 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C39993%2C1732616918801.meta, suffix=.meta, logDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801, archiveDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs, maxLogs=32 2024-11-26T10:28:39,577 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta 2024-11-26T10:28:39,582 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta 2024-11-26T10:28:39,586 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36829:36829),(127.0.0.1/127.0.0.1:33733:33733)] 2024-11-26T10:28:39,590 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:28:39,590 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:28:39,590 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:28:39,591 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:28:39,591 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:28:39,591 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:28:39,591 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:28:39,591 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:28:39,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:28:39,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:28:39,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:39,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:39,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:28:39,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:28:39,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:39,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:39,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:28:39,595 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:28:39,595 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:39,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:39,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:28:39,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:28:39,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:39,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:28:39,597 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:28:39,597 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740 2024-11-26T10:28:39,598 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740 2024-11-26T10:28:39,600 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:28:39,600 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:28:39,600 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:28:39,602 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:28:39,602 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763895, jitterRate=-0.028657466173171997}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:28:39,603 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:28:39,603 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732616919591Writing region info on filesystem at 1732616919591Initializing all the Stores at 1732616919592 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616919592Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616919592Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616919592Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616919592Cleaning up temporary data from old regions at 1732616919600 (+8 ms)Running coprocessor post-open hooks at 1732616919603 (+3 ms)Region opened successfully at 1732616919603 2024-11-26T10:28:39,604 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732616919569 2024-11-26T10:28:39,607 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:28:39,607 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:28:39,608 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,609 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,39993,1732616918801, state=OPEN 2024-11-26T10:28:39,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:28:39,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:28:39,613 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,613 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:28:39,613 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:28:39,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:28:39,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,39993,1732616918801 in 198 msec 2024-11-26T10:28:39,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:28:39,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-26T10:28:39,619 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:28:39,619 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:28:39,621 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:28:39,621 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,39993,1732616918801, seqNum=-1] 2024-11-26T10:28:39,621 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:28:39,623 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46029, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:28:39,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 664 msec 2024-11-26T10:28:39,628 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732616919628, completionTime=-1 2024-11-26T10:28:39,628 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:28:39,628 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732616979630 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732617039630 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37987,1732616918757-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37987,1732616918757-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,630 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37987,1732616918757-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,631 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:37987, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,631 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,631 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,632 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.794sec 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37987,1732616918757-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:28:39,634 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37987,1732616918757-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:28:39,637 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:28:39,637 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:28:39,637 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,37987,1732616918757-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:28:39,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63008d08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:28:39,718 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,37987,-1 for getting cluster id 2024-11-26T10:28:39,718 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:28:39,719 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cb3aeda6-f116-45f5-a906-29a97b358965' 2024-11-26T10:28:39,720 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:28:39,720 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cb3aeda6-f116-45f5-a906-29a97b358965" 2024-11-26T10:28:39,720 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17607df8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:28:39,720 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,37987,-1] 2024-11-26T10:28:39,721 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:28:39,721 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:28:39,722 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33616, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:28:39,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b9f6f01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:28:39,724 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:28:39,724 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,39993,1732616918801, seqNum=-1] 2024-11-26T10:28:39,725 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:28:39,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:28:39,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,37987,1732616918757 2024-11-26T10:28:39,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:28:39,731 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:28:39,731 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-26T10:28:39,731 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-26T10:28:39,731 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-26T10:28:39,732 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 94eedbb855cf,37987,1732616918757 2024-11-26T10:28:39,732 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6a774343 2024-11-26T10:28:39,732 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:28:39,734 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33626, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:28:39,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-26T10:28:39,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-26T10:28:39,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:28:39,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-26T10:28:39,737 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:28:39,737 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:39,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-26T10:28:39,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:28:39,738 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:28:39,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741835_1011 (size=395) 2024-11-26T10:28:39,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741835_1011 (size=395) 2024-11-26T10:28:39,747 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5ccca399e53b484dcf4d57331194c363, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7 2024-11-26T10:28:39,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741836_1012 (size=78) 2024-11-26T10:28:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37821 is added to blk_1073741836_1012 (size=78) 2024-11-26T10:28:39,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:28:39,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 5ccca399e53b484dcf4d57331194c363, disabling compactions & flushes 2024-11-26T10:28:39,753 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:39,754 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:39,754 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. after waiting 0 ms 2024-11-26T10:28:39,754 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:39,754 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:39,754 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5ccca399e53b484dcf4d57331194c363: Waiting for close lock at 1732616919753Disabling compacts and flushes for region at 1732616919753Disabling writes for close at 1732616919754 (+1 ms)Writing region close event to WAL at 1732616919754Closed at 1732616919754 2024-11-26T10:28:39,755 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:28:39,756 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732616919755"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732616919755"}]},"ts":"1732616919755"} 2024-11-26T10:28:39,758 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-26T10:28:39,759 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:28:39,759 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616919759"}]},"ts":"1732616919759"} 2024-11-26T10:28:39,761 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-26T10:28:39,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5ccca399e53b484dcf4d57331194c363, ASSIGN}] 2024-11-26T10:28:39,763 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5ccca399e53b484dcf4d57331194c363, ASSIGN 2024-11-26T10:28:39,764 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5ccca399e53b484dcf4d57331194c363, ASSIGN; state=OFFLINE, location=94eedbb855cf,39993,1732616918801; forceNewPlan=false, retain=false 2024-11-26T10:28:39,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:39,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:39,914 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5ccca399e53b484dcf4d57331194c363, regionState=OPENING, regionLocation=94eedbb855cf,39993,1732616918801 2024-11-26T10:28:39,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5ccca399e53b484dcf4d57331194c363, ASSIGN because future has completed 2024-11-26T10:28:39,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ccca399e53b484dcf4d57331194c363, server=94eedbb855cf,39993,1732616918801}] 2024-11-26T10:28:40,074 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:40,074 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5ccca399e53b484dcf4d57331194c363, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:28:40,074 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,074 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:28:40,075 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,075 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,076 INFO [StoreOpener-5ccca399e53b484dcf4d57331194c363-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,077 INFO [StoreOpener-5ccca399e53b484dcf4d57331194c363-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ccca399e53b484dcf4d57331194c363 columnFamilyName info 2024-11-26T10:28:40,077 DEBUG [StoreOpener-5ccca399e53b484dcf4d57331194c363-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:28:40,078 INFO [StoreOpener-5ccca399e53b484dcf4d57331194c363-1 {}] regionserver.HStore(327): Store=5ccca399e53b484dcf4d57331194c363/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:28:40,078 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,078 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,079 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,079 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,079 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,081 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,083 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:28:40,083 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5ccca399e53b484dcf4d57331194c363; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788143, jitterRate=0.002176836133003235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:28:40,083 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:28:40,084 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5ccca399e53b484dcf4d57331194c363: Running coprocessor pre-open hook at 1732616920075Writing region info on filesystem at 1732616920075Initializing all the Stores at 1732616920075Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616920075Cleaning up temporary data from old regions at 1732616920079 (+4 ms)Running coprocessor post-open hooks at 1732616920083 (+4 ms)Region opened successfully at 1732616920084 (+1 ms) 2024-11-26T10:28:40,085 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363., pid=6, masterSystemTime=1732616920070 2024-11-26T10:28:40,087 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:40,088 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:40,088 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5ccca399e53b484dcf4d57331194c363, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,39993,1732616918801 2024-11-26T10:28:40,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ccca399e53b484dcf4d57331194c363, server=94eedbb855cf,39993,1732616918801 because future has completed 2024-11-26T10:28:40,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-26T10:28:40,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5ccca399e53b484dcf4d57331194c363, server=94eedbb855cf,39993,1732616918801 in 175 msec 2024-11-26T10:28:40,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-26T10:28:40,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5ccca399e53b484dcf4d57331194c363, ASSIGN in 333 msec 2024-11-26T10:28:40,098 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:28:40,098 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616920098"}]},"ts":"1732616920098"} 2024-11-26T10:28:40,100 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-26T10:28:40,101 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:28:40,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-11-26T10:28:40,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:40,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:41,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:41,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:42,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:42,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:43,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-26T10:28:43,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-26T10:28:43,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-26T10:28:43,255 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-26T10:28:43,255 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:28:43,255 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-26T10:28:43,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:43,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:44,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:44,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:45,093 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:28:45,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:28:45,121 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:28:45,121 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-26T10:28:45,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:45,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:46,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:46,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:47,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:47,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:48,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:48,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:49,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:49,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:28:49,844 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-26T10:28:49,844 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-26T10:28:49,847 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-26T10:28:49,847 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:28:49,851 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363., hostname=94eedbb855cf,39993,1732616918801, seqNum=2] 2024-11-26T10:28:50,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:50,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:51,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:51,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:51,854 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:28:51,854 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:51,854 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:51,855 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:51,855 WARN [DataStreamer for file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 block BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]) is bad. 2024-11-26T10:28:51,855 WARN [DataStreamer for file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta block BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]) is bad. 2024-11-26T10:28:51,855 WARN [DataStreamer for file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 block BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK], DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37821,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]) is bad. 2024-11-26T10:28:51,855 WARN [PacketResponder: BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37821] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108836593_22 at /127.0.0.1:54612 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54612 dst: /127.0.0.1:37821 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:54646 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54646 dst: /127.0.0.1:37821 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108836593_22 at /127.0.0.1:43694 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43694 dst: /127.0.0.1:33257 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:43724 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43724 dst: /127.0.0.1:33257 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:43722 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43722 dst: /127.0.0.1:33257 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:54636 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54636 dst: /127.0.0.1:37821 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:51,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@144c75a2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:51,858 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37b7928{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:51,858 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:51,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@482b646b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:51,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@650740c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:51,860 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:51,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:51,861 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039670754-172.17.0.2-1732616918076 (Datanode Uuid 7550d28f-0df4-40a2-bbdb-7d274374c671) service to localhost/127.0.0.1:46631 2024-11-26T10:28:51,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:51,861 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data3/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:51,861 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data4/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:51,862 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:51,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:51,877 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:51,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:51,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:51,878 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:51,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c23b3ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:51,879 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@487eadc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:51,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30b55012{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-43631-hadoop-hdfs-3_4_1-tests_jar-_-any-4979409513595657755/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:51,992 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371cd9e1{HTTP/1.1, (http/1.1)}{localhost:43631} 2024-11-26T10:28:51,992 INFO [Time-limited test {}] server.Server(415): Started @164140ms 2024-11-26T10:28:51,994 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:52,013 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:52,013 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:52,013 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:52,014 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:42586 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42586 dst: /127.0.0.1:33257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:52,014 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:42576 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42576 dst: /127.0.0.1:33257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:52,014 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108836593_22 at /127.0.0.1:42574 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42574 dst: /127.0.0.1:33257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:52,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@119a3311{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:52,019 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@168478e8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:52,019 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:52,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bd771df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:52,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@109832d2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:52,021 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:52,021 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:52,021 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039670754-172.17.0.2-1732616918076 (Datanode Uuid 3d0812ea-202c-4415-8158-862e43e0ced4) service to localhost/127.0.0.1:46631 2024-11-26T10:28:52,021 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:52,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data1/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:52,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data2/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:52,022 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:52,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:52,037 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:52,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:52,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:52,038 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:52,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b68c165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:52,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b408bc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:52,094 WARN [Thread-1342 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:52,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd2e332f3b514b4a with lease ID 0x5e40b9f250ab4b45: from storage DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b node DatanodeRegistration(127.0.0.1:32989, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=39455, infoSecurePort=0, ipcPort=38801, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:52,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd2e332f3b514b4a with lease ID 0x5e40b9f250ab4b45: from storage DS-569afac8-182c-4c39-bab7-784b9acd8ebd node DatanodeRegistration(127.0.0.1:32989, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=39455, infoSecurePort=0, ipcPort=38801, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:52,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5282eca5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-42925-hadoop-hdfs-3_4_1-tests_jar-_-any-7877404294016208050/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:52,155 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@156f820b{HTTP/1.1, (http/1.1)}{localhost:42925} 2024-11-26T10:28:52,155 INFO [Time-limited test {}] server.Server(415): Started @164302ms 2024-11-26T10:28:52,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:52,264 WARN [Thread-1373 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:52,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd96257a7fe4c125b with lease ID 0x5e40b9f250ab4b46: from storage DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea node DatanodeRegistration(127.0.0.1:45161, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=38105, infoSecurePort=0, ipcPort=36177, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:52,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd96257a7fe4c125b with lease ID 0x5e40b9f250ab4b46: from storage DS-e518e725-eeb2-4087-9263-624bbafa80fb node DatanodeRegistration(127.0.0.1:45161, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=38105, infoSecurePort=0, ipcPort=36177, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:52,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:52,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:53,174 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-26T10:28:53,178 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-26T10:28:53,179 ERROR [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:53,179 WARN [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:53,180 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C39993%2C1732616918801:(num 1732616919193) roll requested 2024-11-26T10:28:53,180 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:28:53,185 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 newFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:28:53,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:53,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:53,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:53,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:53,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:53,186 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:28:53,186 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:53,186 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:53,186 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:28:53,187 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39455:39455),(127.0.0.1/127.0.0.1:38105:38105)] 2024-11-26T10:28:53,187 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 is not closed yet, will try archiving it next time 2024-11-26T10:28:53,187 WARN [IPC Server handler 4 on default port 46631 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-26T10:28:53,187 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 after 1ms 2024-11-26T10:28:53,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:53,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:54,096 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-26T10:28:54,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:54,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:55,190 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-26T10:28:55,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:55,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:56,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:56,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:57,188 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 after 4002ms 2024-11-26T10:28:57,193 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:45161,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:57,194 WARN [DataStreamer for file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 block BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32989,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK], DatanodeInfoWithStorage[127.0.0.1:45161,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45161,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]) is bad. 2024-11-26T10:28:57,194 WARN [PacketResponder: BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45161] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:57,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:34574 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:32989:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34574 dst: /127.0.0.1:32989 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:57,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:41118 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41118 dst: /127.0.0.1:45161 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:57,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5282eca5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:57,196 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@156f820b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:57,196 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:57,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b408bc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:57,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b68c165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:57,198 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:57,198 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039670754-172.17.0.2-1732616918076 (Datanode Uuid 3d0812ea-202c-4415-8158-862e43e0ced4) service to localhost/127.0.0.1:46631 2024-11-26T10:28:57,198 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:57,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:57,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data1/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:57,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data2/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:57,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:57,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:57,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:57,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:57,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:57,212 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:57,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d151a18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:57,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b944a8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:57,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27f5a072{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-39093-hadoop-hdfs-3_4_1-tests_jar-_-any-14866500795382835038/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:57,329 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a397072{HTTP/1.1, (http/1.1)}{localhost:39093} 2024-11-26T10:28:57,329 INFO [Time-limited test {}] server.Server(415): Started @169477ms 2024-11-26T10:28:57,331 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:57,348 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:57,349 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1767107655_22 at /127.0.0.1:34586 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:32989:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34586 dst: /127.0.0.1:32989 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:28:57,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30b55012{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:57,353 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371cd9e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:28:57,353 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:28:57,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@487eadc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:28:57,354 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c23b3ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:28:57,355 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:28:57,355 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039670754-172.17.0.2-1732616918076 (Datanode Uuid 7550d28f-0df4-40a2-bbdb-7d274374c671) service to localhost/127.0.0.1:46631 2024-11-26T10:28:57,355 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:28:57,355 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:28:57,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data3/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:57,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data4/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:28:57,360 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:28:57,369 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:28:57,373 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:28:57,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:28:57,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:28:57,374 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:28:57,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2392cae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:28:57,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aaed393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:28:57,427 WARN [Thread-1416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:57,430 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5f23b4938349429 with lease ID 0x5e40b9f250ab4b47: from storage DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea node DatanodeRegistration(127.0.0.1:41559, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=37707, infoSecurePort=0, ipcPort=36591, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:57,430 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5f23b4938349429 with lease ID 0x5e40b9f250ab4b47: from storage DS-e518e725-eeb2-4087-9263-624bbafa80fb node DatanodeRegistration(127.0.0.1:41559, datanodeUuid=3d0812ea-202c-4415-8158-862e43e0ced4, infoPort=37707, infoSecurePort=0, ipcPort=36591, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:57,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a90fb45{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/java.io.tmpdir/jetty-localhost-41833-hadoop-hdfs-3_4_1-tests_jar-_-any-226876234300251125/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:28:57,490 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dad3af2{HTTP/1.1, (http/1.1)}{localhost:41833} 2024-11-26T10:28:57,490 INFO [Time-limited test {}] server.Server(415): Started @169638ms 2024-11-26T10:28:57,492 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:28:57,584 WARN [Thread-1447 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:28:57,587 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b360925f50f8558 with lease ID 0x5e40b9f250ab4b48: from storage DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b node DatanodeRegistration(127.0.0.1:46399, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=33179, infoSecurePort=0, ipcPort=36309, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:57,587 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b360925f50f8558 with lease ID 0x5e40b9f250ab4b48: from storage DS-569afac8-182c-4c39-bab7-784b9acd8ebd node DatanodeRegistration(127.0.0.1:46399, datanodeUuid=7550d28f-0df4-40a2-bbdb-7d274374c671, infoPort=33179, infoSecurePort=0, ipcPort=36309, storageInfo=lv=-57;cid=testClusterID;nsid=34050987;c=1732616918076), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:28:57,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:57,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:58,510 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-26T10:28:58,512 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-26T10:28:58,513 ERROR [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32989,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:58,514 WARN [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32989,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:58,514 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C39993%2C1732616918801:(num 1732616933180) roll requested 2024-11-26T10:28:58,514 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.1732616938514 2024-11-26T10:28:58,519 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 newFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 2024-11-26T10:28:58,520 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:58,520 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:58,520 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:58,520 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:58,520 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:28:58,520 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 2024-11-26T10:28:58,520 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32989,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:58,520 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32989,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:28:58,520 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:28:58,521 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33179:33179),(127.0.0.1/127.0.0.1:37707:37707)] 2024-11-26T10:28:58,521 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 is not closed yet, will try archiving it next time 2024-11-26T10:28:58,521 WARN [IPC Server handler 2 on default port 46631 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-26T10:28:58,521 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 after 1ms 2024-11-26T10:28:58,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:58,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:59,429 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-26T10:28:59,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:28:59,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:00,522 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:00,528 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 newFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:00,528 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:00,528 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:00,528 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:00,528 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:00,529 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:00,529 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:00,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37707:37707),(127.0.0.1/127.0.0.1:33179:33179)] 2024-11-26T10:29:00,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 is not closed yet, will try archiving it next time 2024-11-26T10:29:00,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 is not closed yet, will try archiving it next time 2024-11-26T10:29:00,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741838_1019 (size=1264) 2024-11-26T10:29:00,530 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:29:00,531 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:29:00,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741838_1019 (size=1264) 2024-11-26T10:29:00,531 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 is not closed yet, will try archiving it next time 2024-11-26T10:29:00,531 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 after 0ms 2024-11-26T10:29:00,531 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:29:00,540 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732616920084/Put/vlen=218/seqid=0] 2024-11-26T10:29:00,540 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732616929852/Put/vlen=1045/seqid=0] 2024-11-26T10:29:00,540 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616919193 2024-11-26T10:29:00,540 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:29:00,540 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:29:00,541 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 after 1ms 2024-11-26T10:29:00,541 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:29:00,544 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732616933179/Put/vlen=1045/seqid=0] 2024-11-26T10:29:00,544 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732616935191/Put/vlen=1045/seqid=0] 2024-11-26T10:29:00,544 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 2024-11-26T10:29:00,544 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 2024-11-26T10:29:00,544 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 2024-11-26T10:29:00,544 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 after 0ms 2024-11-26T10:29:00,544 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616938514 2024-11-26T10:29:00,547 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732616938513/Put/vlen=1045/seqid=0] 2024-11-26T10:29:00,547 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:00,547 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:00,547 WARN [IPC Server handler 0 on default port 46631 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-26T10:29:00,548 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 after 1ms 2024-11-26T10:29:00,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:00,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:01,429 WARN [ResponseProcessor for block BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:01,429 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108836593_22 at /127.0.0.1:55182 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41559:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55182 dst: /127.0.0.1:41559 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41559 remote=/127.0.0.1:55182]. Total timeout mills is 60000, 59098 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:29:01,430 WARN [DataStreamer for file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 block BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41559,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK], DatanodeInfoWithStorage[127.0.0.1:46399,DS-03e277ae-f504-4f2c-b4f1-b1273c42ff3b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41559,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]) is bad. 2024-11-26T10:29:01,430 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108836593_22 at /127.0.0.1:45476 [Receiving block BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45476 dst: /127.0.0.1:46399 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:29:01,431 WARN [DataStreamer for file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 block BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:01,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741839_1022 (size=85) 2024-11-26T10:29:01,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741839_1022 (size=85) 2024-11-26T10:29:01,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:01,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:02,522 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616933180 after 4002ms 2024-11-26T10:29:02,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:02,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:03,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:03,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:04,548 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 after 4001ms 2024-11-26T10:29:04,548 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:04,552 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:04,553 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-26T10:29:04,553 ERROR [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,553 WARN [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,553 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C39993%2C1732616918801.meta:.meta(num 1732616919577) roll requested 2024-11-26T10:29:04,553 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.meta.1732616944553.meta 2024-11-26T10:29:04,558 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,558 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,558 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,558 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,559 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,559 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616944553.meta 2024-11-26T10:29:04,559 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,559 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,559 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta 2024-11-26T10:29:04,560 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33179:33179),(127.0.0.1/127.0.0.1:37707:37707)] 2024-11-26T10:29:04,560 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta is not closed yet, will try archiving it next time 2024-11-26T10:29:04,560 WARN [IPC Server handler 4 on default port 46631 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-26T10:29:04,560 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta after 1ms 2024-11-26T10:29:04,575 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/info/fadeae6b3f8649a5ad214d649e1eabf9 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363./info:regioninfo/1732616920088/Put/seqid=0 2024-11-26T10:29:04,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741841_1025 (size=7125) 2024-11-26T10:29:04,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741841_1025 (size=7125) 2024-11-26T10:29:04,580 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/info/fadeae6b3f8649a5ad214d649e1eabf9 2024-11-26T10:29:04,600 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/ns/42f42db0f5b744d79e35660c0de1c13f is 43, key is default/ns:d/1732616919623/Put/seqid=0 2024-11-26T10:29:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741842_1026 (size=5153) 2024-11-26T10:29:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741842_1026 (size=5153) 2024-11-26T10:29:04,605 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/ns/42f42db0f5b744d79e35660c0de1c13f 2024-11-26T10:29:04,624 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/table/dbbd292ae3204c7d8f76f8c18037c156 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732616920098/Put/seqid=0 2024-11-26T10:29:04,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741843_1027 (size=5438) 2024-11-26T10:29:04,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741843_1027 (size=5438) 2024-11-26T10:29:04,629 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/table/dbbd292ae3204c7d8f76f8c18037c156 2024-11-26T10:29:04,634 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/info/fadeae6b3f8649a5ad214d649e1eabf9 as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/info/fadeae6b3f8649a5ad214d649e1eabf9 2024-11-26T10:29:04,639 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/info/fadeae6b3f8649a5ad214d649e1eabf9, entries=10, sequenceid=11, filesize=7.0 K 2024-11-26T10:29:04,640 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/ns/42f42db0f5b744d79e35660c0de1c13f as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/ns/42f42db0f5b744d79e35660c0de1c13f 2024-11-26T10:29:04,646 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/ns/42f42db0f5b744d79e35660c0de1c13f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-26T10:29:04,646 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/.tmp/table/dbbd292ae3204c7d8f76f8c18037c156 as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/table/dbbd292ae3204c7d8f76f8c18037c156 2024-11-26T10:29:04,652 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/table/dbbd292ae3204c7d8f76f8c18037c156, entries=2, sequenceid=11, filesize=5.3 K 2024-11-26T10:29:04,653 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-11-26T10:29:04,654 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-26T10:29:04,654 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5ccca399e53b484dcf4d57331194c363 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-26T10:29:04,654 ERROR [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,655 WARN [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7-prefix:94eedbb855cf,39993,1732616918801 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,655 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C39993%2C1732616918801:(num 1732616940522) roll requested 2024-11-26T10:29:04,655 INFO [regionserver/94eedbb855cf:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39993%2C1732616918801.1732616944655 2024-11-26T10:29:04,661 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 newFile=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616944655 2024-11-26T10:29:04,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,661 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,661 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,661 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,662 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616944655 2024-11-26T10:29:04,662 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,662 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1039670754-172.17.0.2-1732616918076:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:04,663 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:04,663 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 after 0ms 2024-11-26T10:29:04,665 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.1732616940522 to hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs/94eedbb855cf%2C39993%2C1732616918801.1732616940522 2024-11-26T10:29:04,666 DEBUG [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33179:33179),(127.0.0.1/127.0.0.1:37707:37707)] 2024-11-26T10:29:04,682 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/.tmp/info/47f72d5eb8224074aaa2e5a67eaff8aa is 1080, key is row1002/info:/1732616929852/Put/seqid=0 2024-11-26T10:29:04,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741845_1029 (size=9270) 2024-11-26T10:29:04,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741845_1029 (size=9270) 2024-11-26T10:29:04,687 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/.tmp/info/47f72d5eb8224074aaa2e5a67eaff8aa 2024-11-26T10:29:04,693 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/.tmp/info/47f72d5eb8224074aaa2e5a67eaff8aa as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/info/47f72d5eb8224074aaa2e5a67eaff8aa 2024-11-26T10:29:04,697 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/info/47f72d5eb8224074aaa2e5a67eaff8aa, entries=4, sequenceid=8, filesize=9.1 K 2024-11-26T10:29:04,699 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 5ccca399e53b484dcf4d57331194c363 in 44ms, sequenceid=8, compaction requested=false 2024-11-26T10:29:04,699 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5ccca399e53b484dcf4d57331194c363: 2024-11-26T10:29:04,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:29:04,703 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:29:04,703 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:29:04,703 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:29:04,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:29:04,704 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:29:04,704 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:29:04,704 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1430893487, stopped=false 2024-11-26T10:29:04,704 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,37987,1732616918757 2024-11-26T10:29:04,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:29:04,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:29:04,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:04,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:04,706 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:29:04,706 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:29:04,706 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:29:04,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:29:04,706 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,39993,1732616918801' ***** 2024-11-26T10:29:04,706 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:29:04,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:29:04,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(3091): Received CLOSE for 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:29:04,707 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,39993,1732616918801 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:29:04,707 INFO [RS:0;94eedbb855cf:39993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:39993. 2024-11-26T10:29:04,707 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5ccca399e53b484dcf4d57331194c363, disabling compactions & flushes 2024-11-26T10:29:04,708 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:29:04,708 DEBUG [RS:0;94eedbb855cf:39993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:29:04,708 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:29:04,708 DEBUG [RS:0;94eedbb855cf:39993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:29:04,708 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. after waiting 0 ms 2024-11-26T10:29:04,708 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:29:04,708 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:29:04,708 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:29:04,708 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:29:04,708 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:29:04,708 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-26T10:29:04,708 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5ccca399e53b484dcf4d57331194c363=TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363.} 2024-11-26T10:29:04,708 DEBUG [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5ccca399e53b484dcf4d57331194c363 2024-11-26T10:29:04,708 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:29:04,708 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:29:04,709 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:29:04,709 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:29:04,709 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:29:04,716 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/default/TestLogRolling-testLogRollOnPipelineRestart/5ccca399e53b484dcf4d57331194c363/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-26T10:29:04,717 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:29:04,717 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5ccca399e53b484dcf4d57331194c363: Waiting for close lock at 1732616944707Running coprocessor pre-close hooks at 1732616944707Disabling compacts and flushes for region at 1732616944707Disabling writes for close at 1732616944708 (+1 ms)Writing region close event to WAL at 1732616944708Running coprocessor post-close hooks at 1732616944717 (+9 ms)Closed at 1732616944717 2024-11-26T10:29:04,717 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732616919734.5ccca399e53b484dcf4d57331194c363. 2024-11-26T10:29:04,719 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-26T10:29:04,720 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:29:04,720 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:29:04,720 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616944708Running coprocessor pre-close hooks at 1732616944708Disabling compacts and flushes for region at 1732616944708Disabling writes for close at 1732616944709 (+1 ms)Writing region close event to WAL at 1732616944716 (+7 ms)Running coprocessor post-close hooks at 1732616944720 (+4 ms)Closed at 1732616944720 2024-11-26T10:29:04,720 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:29:04,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:04,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:04,909 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,39993,1732616918801; all regions closed. 2024-11-26T10:29:04,909 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,909 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,909 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,910 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,910 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:04,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741840_1023 (size=825) 2024-11-26T10:29:04,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741840_1023 (size=825) 2024-11-26T10:29:05,059 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:29:05,141 INFO [regionserver/94eedbb855cf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-26T10:29:05,141 INFO [regionserver/94eedbb855cf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-26T10:29:05,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:05,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:06,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:06,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:07,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:07,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:08,561 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta after 4002ms 2024-11-26T10:29:08,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/WALs/94eedbb855cf,39993,1732616918801/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta to hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs/94eedbb855cf%2C39993%2C1732616918801.meta.1732616919577.meta 2024-11-26T10:29:08,564 DEBUG [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs 2024-11-26T10:29:08,564 INFO [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C39993%2C1732616918801.meta:.meta(num 1732616944553) 2024-11-26T10:29:08,565 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,565 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,565 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,565 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,565 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741844_1028 (size=1162) 2024-11-26T10:29:08,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741844_1028 (size=1162) 2024-11-26T10:29:08,572 DEBUG [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs 2024-11-26T10:29:08,572 INFO [RS:0;94eedbb855cf:39993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C39993%2C1732616918801:(num 1732616944655) 2024-11-26T10:29:08,572 DEBUG [RS:0;94eedbb855cf:39993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:29:08,572 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:29:08,572 INFO [RS:0;94eedbb855cf:39993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:29:08,572 INFO [RS:0;94eedbb855cf:39993 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-26T10:29:08,572 INFO [RS:0;94eedbb855cf:39993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:29:08,572 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:29:08,573 INFO [RS:0;94eedbb855cf:39993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39993 2024-11-26T10:29:08,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,39993,1732616918801 2024-11-26T10:29:08,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:29:08,575 INFO [RS:0;94eedbb855cf:39993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:29:08,575 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,39993,1732616918801] 2024-11-26T10:29:08,578 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,39993,1732616918801 already deleted, retry=false 2024-11-26T10:29:08,578 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,39993,1732616918801 expired; onlineServers=0 2024-11-26T10:29:08,578 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,37987,1732616918757' ***** 2024-11-26T10:29:08,578 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:29:08,578 INFO [M:0;94eedbb855cf:37987 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:29:08,578 INFO [M:0;94eedbb855cf:37987 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:29:08,579 DEBUG [M:0;94eedbb855cf:37987 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:29:08,579 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:29:08,579 DEBUG [M:0;94eedbb855cf:37987 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:29:08,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616918971 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616918971,5,FailOnTimeoutGroup] 2024-11-26T10:29:08,579 INFO [M:0;94eedbb855cf:37987 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:29:08,579 INFO [M:0;94eedbb855cf:37987 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:29:08,579 DEBUG [M:0;94eedbb855cf:37987 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:29:08,579 INFO [M:0;94eedbb855cf:37987 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:29:08,579 INFO [M:0;94eedbb855cf:37987 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:29:08,579 INFO [M:0;94eedbb855cf:37987 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:29:08,579 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:29:08,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616918970 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616918970,5,FailOnTimeoutGroup] 2024-11-26T10:29:08,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:29:08,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:08,581 DEBUG [M:0;94eedbb855cf:37987 {}] zookeeper.ZKUtil(347): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:29:08,581 WARN [M:0;94eedbb855cf:37987 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:29:08,581 INFO [M:0;94eedbb855cf:37987 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/.lastflushedseqids 2024-11-26T10:29:08,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-26T10:29:08,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741846_1030 (size=120) 2024-11-26T10:29:08,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741846_1030 (size=120) 2024-11-26T10:29:08,589 INFO [M:0;94eedbb855cf:37987 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:29:08,589 INFO [M:0;94eedbb855cf:37987 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:29:08,589 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:29:08,589 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:08,589 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:08,589 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:29:08,589 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:08,589 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-26T10:29:08,590 ERROR [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData-prefix:94eedbb855cf,37987,1732616918757 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:08,590 WARN [FSHLog-0-hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData-prefix:94eedbb855cf,37987,1732616918757 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:08,590 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 94eedbb855cf%2C37987%2C1732616918757:(num 1732616918894) roll requested 2024-11-26T10:29:08,591 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C37987%2C1732616918757.1732616948590 2024-11-26T10:29:08,596 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,596 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,597 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,597 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616948590 2024-11-26T10:29:08,597 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:08,598 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33257,DS-a65268c2-6dda-437f-a549-37c2d8b9c0ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-26T10:29:08,598 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 2024-11-26T10:29:08,598 WARN [IPC Server handler 3 on default port 46631 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-26T10:29:08,598 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 after 0ms 2024-11-26T10:29:08,603 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37707:37707),(127.0.0.1/127.0.0.1:33179:33179)] 2024-11-26T10:29:08,603 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 is not closed yet, will try archiving it next time 2024-11-26T10:29:08,623 DEBUG [M:0;94eedbb855cf:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cfad2933ac8747d0887887ef163e063e is 82, key is hbase:meta,,1/info:regioninfo/1732616919607/Put/seqid=0 2024-11-26T10:29:08,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741848_1033 (size=5672) 2024-11-26T10:29:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741848_1033 (size=5672) 2024-11-26T10:29:08,628 INFO [M:0;94eedbb855cf:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cfad2933ac8747d0887887ef163e063e 2024-11-26T10:29:08,655 DEBUG [M:0;94eedbb855cf:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ae2ab98bff7e423383fa690dc962a218 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732616920102/Put/seqid=0 2024-11-26T10:29:08,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741849_1034 (size=6117) 2024-11-26T10:29:08,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741849_1034 (size=6117) 2024-11-26T10:29:08,661 INFO [M:0;94eedbb855cf:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ae2ab98bff7e423383fa690dc962a218 2024-11-26T10:29:08,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:29:08,676 INFO [RS:0;94eedbb855cf:39993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:29:08,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39993-0x10153d201660001, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:29:08,677 INFO [RS:0;94eedbb855cf:39993 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,39993,1732616918801; zookeeper connection closed. 2024-11-26T10:29:08,677 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68a2d8d4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68a2d8d4 2024-11-26T10:29:08,677 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:29:08,682 DEBUG [M:0;94eedbb855cf:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac1b53adb3e9427896dfb748015ed65d is 69, key is 94eedbb855cf,39993,1732616918801/rs:state/1732616919045/Put/seqid=0 2024-11-26T10:29:08,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741850_1035 (size=5156) 2024-11-26T10:29:08,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741850_1035 (size=5156) 2024-11-26T10:29:08,688 INFO [M:0;94eedbb855cf:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac1b53adb3e9427896dfb748015ed65d 2024-11-26T10:29:08,716 DEBUG [M:0;94eedbb855cf:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/edaa146e689041489c8e136eb3d06d7f is 52, key is load_balancer_on/state:d/1732616919730/Put/seqid=0 2024-11-26T10:29:08,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741851_1036 (size=5056) 2024-11-26T10:29:08,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741851_1036 (size=5056) 2024-11-26T10:29:08,722 INFO [M:0;94eedbb855cf:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/edaa146e689041489c8e136eb3d06d7f 2024-11-26T10:29:08,727 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cfad2933ac8747d0887887ef163e063e as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cfad2933ac8747d0887887ef163e063e 2024-11-26T10:29:08,732 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cfad2933ac8747d0887887ef163e063e, entries=8, sequenceid=56, filesize=5.5 K 2024-11-26T10:29:08,733 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ae2ab98bff7e423383fa690dc962a218 as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ae2ab98bff7e423383fa690dc962a218 2024-11-26T10:29:08,738 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ae2ab98bff7e423383fa690dc962a218, entries=6, sequenceid=56, filesize=6.0 K 2024-11-26T10:29:08,739 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac1b53adb3e9427896dfb748015ed65d as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac1b53adb3e9427896dfb748015ed65d 2024-11-26T10:29:08,740 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:29:08,744 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac1b53adb3e9427896dfb748015ed65d, entries=1, sequenceid=56, filesize=5.0 K 2024-11-26T10:29:08,745 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/edaa146e689041489c8e136eb3d06d7f as hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/edaa146e689041489c8e136eb3d06d7f 2024-11-26T10:29:08,750 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/edaa146e689041489c8e136eb3d06d7f, entries=1, sequenceid=56, filesize=4.9 K 2024-11-26T10:29:08,751 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=56, compaction requested=false 2024-11-26T10:29:08,753 INFO [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:08,753 DEBUG [M:0;94eedbb855cf:37987 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616948589Disabling compacts and flushes for region at 1732616948589Disabling writes for close at 1732616948589Obtaining lock to block concurrent updates at 1732616948589Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732616948590 (+1 ms)Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1732616948590Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732616948604 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732616948604Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732616948622 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732616948622Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732616948634 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732616948654 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732616948655 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732616948667 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732616948682 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732616948682Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732616948694 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732616948715 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732616948715Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43163e12: reopening flushed file at 1732616948726 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35854acf: reopening flushed file at 1732616948732 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@772ab088: reopening flushed file at 1732616948738 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@113cf166: reopening flushed file at 1732616948744 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=56, compaction requested=false at 1732616948751 (+7 ms)Writing region close event to WAL at 1732616948752 (+1 ms)Closed at 1732616948752 2024-11-26T10:29:08,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,753 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:08,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46399 is added to blk_1073741847_1031 (size=757) 2024-11-26T10:29:08,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41559 is added to blk_1073741847_1031 (size=757) 2024-11-26T10:29:08,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:08,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:09,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:09,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:09,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:10,245 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:29:10,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:10,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:10,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:11,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-26T10:29:11,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:11,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:12,599 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 after 4001ms 2024-11-26T10:29:12,600 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/WALs/94eedbb855cf,37987,1732616918757/94eedbb855cf%2C37987%2C1732616918757.1732616918894 to hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/oldWALs/94eedbb855cf%2C37987%2C1732616918757.1732616918894 2024-11-26T10:29:12,602 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/MasterData/oldWALs/94eedbb855cf%2C37987%2C1732616918757.1732616918894 to hdfs://localhost:46631/user/jenkins/test-data/ae912044-34eb-d469-75a6-763922f894e7/oldWALs/94eedbb855cf%2C37987%2C1732616918757.1732616918894$masterlocalwal$ 2024-11-26T10:29:12,602 INFO [M:0;94eedbb855cf:37987 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:29:12,602 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:29:12,603 INFO [M:0;94eedbb855cf:37987 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37987 2024-11-26T10:29:12,603 INFO [M:0;94eedbb855cf:37987 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:29:12,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:29:12,705 INFO [M:0;94eedbb855cf:37987 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:29:12,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x10153d201660000, quorum=127.0.0.1:51786, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:29:12,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a90fb45{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:29:12,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dad3af2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:29:12,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:29:12,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aaed393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:29:12,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2392cae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:29:12,709 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:29:12,709 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:29:12,709 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:29:12,709 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039670754-172.17.0.2-1732616918076 (Datanode Uuid 7550d28f-0df4-40a2-bbdb-7d274374c671) service to localhost/127.0.0.1:46631 2024-11-26T10:29:12,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data3/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:29:12,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data4/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:29:12,710 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:29:12,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27f5a072{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:29:12,712 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a397072{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:29:12,712 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:29:12,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b944a8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:29:12,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d151a18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:29:12,714 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:29:12,714 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:29:12,714 WARN [BP-1039670754-172.17.0.2-1732616918076 heartbeating to localhost/127.0.0.1:46631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039670754-172.17.0.2-1732616918076 (Datanode Uuid 3d0812ea-202c-4415-8158-862e43e0ced4) service to localhost/127.0.0.1:46631 2024-11-26T10:29:12,714 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:29:12,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data1/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:29:12,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/cluster_6cd46246-a026-bd55-6691-b9d54fce734f/data/data2/current/BP-1039670754-172.17.0.2-1732616918076 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:29:12,715 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:29:12,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5566be26{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:29:12,721 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d60493b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:29:12,721 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:29:12,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a8be7bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:29:12,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a0844a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir/,STOPPED} 2024-11-26T10:29:12,729 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:29:12,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:29:12,756 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46631 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46631 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46631 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46631 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46631 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=71 (was 60) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6261 (was 6607) 2024-11-26T10:29:12,764 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=71, ProcessCount=11, AvailableMemoryMB=6268 2024-11-26T10:29:12,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:29:12,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.log.dir so I do NOT create it in target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17 2024-11-26T10:29:12,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3985f0ec-7c18-520f-8816-1b509651ca20/hadoop.tmp.dir so I do NOT create it in target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c, deleteOnExit=true 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/test.cache.data in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:29:12,765 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:29:12,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:29:12,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:29:12,779 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:29:12,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:12,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:12,849 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:29:12,853 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:29:12,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:29:12,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:29:12,854 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:29:12,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:29:12,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@701842fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:29:12,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40e0483f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:29:12,970 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b4e4fbe{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/java.io.tmpdir/jetty-localhost-42597-hadoop-hdfs-3_4_1-tests_jar-_-any-11558774588782192946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:29:12,970 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18baee58{HTTP/1.1, (http/1.1)}{localhost:42597} 2024-11-26T10:29:12,970 INFO [Time-limited test {}] server.Server(415): Started @185118ms 2024-11-26T10:29:12,983 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:29:13,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:29:13,042 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:29:13,043 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:29:13,043 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:29:13,043 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:29:13,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e7fac47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:29:13,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59a5f2c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:29:13,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b24fbcd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/java.io.tmpdir/jetty-localhost-35371-hadoop-hdfs-3_4_1-tests_jar-_-any-2762741097582363735/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:29:13,158 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21bca0b2{HTTP/1.1, (http/1.1)}{localhost:35371} 2024-11-26T10:29:13,158 INFO [Time-limited test {}] server.Server(415): Started @185305ms 2024-11-26T10:29:13,159 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:29:13,189 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:29:13,192 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:29:13,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:29:13,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:29:13,193 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:29:13,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b5fa12b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:29:13,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cac9c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:29:13,246 WARN [Thread-1641 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data1/current/BP-1347972047-172.17.0.2-1732616952795/current, will proceed with Du for space computation calculation, 2024-11-26T10:29:13,246 WARN [Thread-1642 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data2/current/BP-1347972047-172.17.0.2-1732616952795/current, will proceed with Du for space computation calculation, 2024-11-26T10:29:13,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:29:13,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-26T10:29:13,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-26T10:29:13,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-26T10:29:13,263 WARN [Thread-1620 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:29:13,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb33b21cd79715e6d with lease ID 0x337df4c95b60d6f: Processing first storage report for DS-4e3d9d47-43f4-4deb-a710-faf839a49a5f from datanode DatanodeRegistration(127.0.0.1:33287, datanodeUuid=a90634e4-8f09-4c0f-be14-7428bceef399, infoPort=44667, infoSecurePort=0, ipcPort=46155, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795) 2024-11-26T10:29:13,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb33b21cd79715e6d with lease ID 0x337df4c95b60d6f: from storage DS-4e3d9d47-43f4-4deb-a710-faf839a49a5f node DatanodeRegistration(127.0.0.1:33287, datanodeUuid=a90634e4-8f09-4c0f-be14-7428bceef399, infoPort=44667, infoSecurePort=0, ipcPort=46155, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:29:13,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb33b21cd79715e6d with lease ID 0x337df4c95b60d6f: Processing first storage report for DS-88829e76-1c4e-450f-b382-c3875d3d67ca from datanode DatanodeRegistration(127.0.0.1:33287, datanodeUuid=a90634e4-8f09-4c0f-be14-7428bceef399, infoPort=44667, infoSecurePort=0, ipcPort=46155, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795) 2024-11-26T10:29:13,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb33b21cd79715e6d with lease ID 0x337df4c95b60d6f: from storage DS-88829e76-1c4e-450f-b382-c3875d3d67ca node DatanodeRegistration(127.0.0.1:33287, datanodeUuid=a90634e4-8f09-4c0f-be14-7428bceef399, infoPort=44667, infoSecurePort=0, ipcPort=46155, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:29:13,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72ef9fa2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/java.io.tmpdir/jetty-localhost-36979-hadoop-hdfs-3_4_1-tests_jar-_-any-10240618780374920008/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:29:13,311 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@618f0457{HTTP/1.1, (http/1.1)}{localhost:36979} 2024-11-26T10:29:13,311 INFO [Time-limited test {}] server.Server(415): Started @185458ms 2024-11-26T10:29:13,312 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:29:13,414 WARN [Thread-1667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data3/current/BP-1347972047-172.17.0.2-1732616952795/current, will proceed with Du for space computation calculation, 2024-11-26T10:29:13,414 WARN [Thread-1668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data4/current/BP-1347972047-172.17.0.2-1732616952795/current, will proceed with Du for space computation calculation, 2024-11-26T10:29:13,437 WARN [Thread-1656 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:29:13,441 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93bca6aacef8218f with lease ID 0x337df4c95b60d70: Processing first storage report for DS-63bd8cb1-924a-4615-968a-94d61ae4b4de from datanode DatanodeRegistration(127.0.0.1:40985, datanodeUuid=59ed9d1c-efce-4773-8193-f9804574b8bc, infoPort=44549, infoSecurePort=0, ipcPort=41411, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795) 2024-11-26T10:29:13,441 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93bca6aacef8218f with lease ID 0x337df4c95b60d70: from storage DS-63bd8cb1-924a-4615-968a-94d61ae4b4de node DatanodeRegistration(127.0.0.1:40985, datanodeUuid=59ed9d1c-efce-4773-8193-f9804574b8bc, infoPort=44549, infoSecurePort=0, ipcPort=41411, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:29:13,441 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93bca6aacef8218f with lease ID 0x337df4c95b60d70: Processing first storage report for DS-1965a5f6-855c-4eb0-b2ab-306e78c83b5c from datanode DatanodeRegistration(127.0.0.1:40985, datanodeUuid=59ed9d1c-efce-4773-8193-f9804574b8bc, infoPort=44549, infoSecurePort=0, ipcPort=41411, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795) 2024-11-26T10:29:13,441 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93bca6aacef8218f with lease ID 0x337df4c95b60d70: from storage DS-1965a5f6-855c-4eb0-b2ab-306e78c83b5c node DatanodeRegistration(127.0.0.1:40985, datanodeUuid=59ed9d1c-efce-4773-8193-f9804574b8bc, infoPort=44549, infoSecurePort=0, ipcPort=41411, storageInfo=lv=-57;cid=testClusterID;nsid=1501649331;c=1732616952795), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:29:13,537 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17 2024-11-26T10:29:13,540 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/zookeeper_0, clientPort=53861, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:29:13,541 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53861 2024-11-26T10:29:13,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:29:13,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:29:13,554 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d with version=8 2024-11-26T10:29:13,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:29:13,556 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:29:13,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:29:13,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:29:13,556 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:29:13,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:29:13,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:29:13,556 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:29:13,557 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:29:13,557 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39559 2024-11-26T10:29:13,558 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39559 connecting to ZooKeeper ensemble=127.0.0.1:53861 2024-11-26T10:29:13,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395590x0, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:29:13,567 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39559-0x10153d289530000 connected 2024-11-26T10:29:13,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:29:13,589 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d, hbase.cluster.distributed=false 2024-11-26T10:29:13,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:29:13,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39559 2024-11-26T10:29:13,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39559 2024-11-26T10:29:13,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39559 2024-11-26T10:29:13,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39559 2024-11-26T10:29:13,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39559 2024-11-26T10:29:13,616 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:29:13,616 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:29:13,617 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46275 2024-11-26T10:29:13,618 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46275 connecting to ZooKeeper ensemble=127.0.0.1:53861 2024-11-26T10:29:13,619 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462750x0, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:29:13,624 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46275-0x10153d289530001 connected 2024-11-26T10:29:13,624 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:29:13,625 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:29:13,625 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:29:13,626 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:29:13,627 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:29:13,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46275 2024-11-26T10:29:13,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46275 2024-11-26T10:29:13,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46275 2024-11-26T10:29:13,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46275 2024-11-26T10:29:13,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46275 2024-11-26T10:29:13,644 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:39559 2024-11-26T10:29:13,644 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:29:13,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:29:13,646 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:29:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,649 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:29:13,649 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,39559,1732616953556 from backup master directory 2024-11-26T10:29:13,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:29:13,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:29:13,652 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:29:13,652 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,656 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/hbase.id] with ID: 2c0c30f2-2a7b-4d4e-af8c-eeb6d46aee32 2024-11-26T10:29:13,656 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/.tmp/hbase.id 2024-11-26T10:29:13,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:29:13,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:29:13,663 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/.tmp/hbase.id]:[hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/hbase.id] 2024-11-26T10:29:13,673 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:13,673 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:29:13,674 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-26T10:29:13,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:29:13,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:29:13,683 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:29:13,684 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:29:13,684 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:29:13,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:29:13,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:29:13,691 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store 2024-11-26T10:29:13,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:29:13,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:29:13,700 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:29:13,700 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:29:13,700 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:13,700 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:13,700 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:29:13,700 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:13,700 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:29:13,700 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732616953700Disabling compacts and flushes for region at 1732616953700Disabling writes for close at 1732616953700Writing region close event to WAL at 1732616953700Closed at 1732616953700 2024-11-26T10:29:13,701 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/.initializing 2024-11-26T10:29:13,701 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/WALs/94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,703 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C39559%2C1732616953556, suffix=, logDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/WALs/94eedbb855cf,39559,1732616953556, archiveDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/oldWALs, maxLogs=10 2024-11-26T10:29:13,703 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C39559%2C1732616953556.1732616953703 2024-11-26T10:29:13,708 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/WALs/94eedbb855cf,39559,1732616953556/94eedbb855cf%2C39559%2C1732616953556.1732616953703 2024-11-26T10:29:13,708 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44549:44549),(127.0.0.1/127.0.0.1:44667:44667)] 2024-11-26T10:29:13,709 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:29:13,709 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:29:13,709 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,709 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:29:13,712 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:13,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:29:13,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:29:13,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:29:13,715 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:29:13,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:29:13,716 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:29:13,717 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,718 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,718 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,719 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,719 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,720 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:29:13,721 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:29:13,723 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:29:13,724 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753403, jitterRate=-0.04199875891208649}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:29:13,725 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732616953709Initializing all the Stores at 1732616953710 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616953710Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616953710Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616953710Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616953710Cleaning up temporary data from old regions at 1732616953719 (+9 ms)Region opened successfully at 1732616953724 (+5 ms) 2024-11-26T10:29:13,725 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:29:13,728 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73393262, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:29:13,729 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:29:13,729 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:29:13,729 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:29:13,729 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:29:13,730 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-26T10:29:13,730 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-26T10:29:13,730 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:29:13,732 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:29:13,733 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:29:13,734 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:29:13,735 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:29:13,735 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:29:13,737 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:29:13,737 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:29:13,738 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:29:13,740 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:29:13,740 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:29:13,742 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:29:13,743 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:29:13,744 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:29:13,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:29:13,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:29:13,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,746 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,39559,1732616953556, sessionid=0x10153d289530000, setting cluster-up flag (Was=false) 2024-11-26T10:29:13,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,756 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:29:13,757 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:13,765 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:29:13,765 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,39559,1732616953556 2024-11-26T10:29:13,767 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:29:13,768 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:29:13,769 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:29:13,769 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:29:13,769 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,39559,1732616953556 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:29:13,770 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,772 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:29:13,772 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:29:13,773 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,773 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732616983777 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:29:13,777 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,778 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:29:13,778 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:29:13,778 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:29:13,778 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:29:13,778 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:29:13,778 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616953778,5,FailOnTimeoutGroup] 2024-11-26T10:29:13,779 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616953778,5,FailOnTimeoutGroup] 2024-11-26T10:29:13,779 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,779 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:29:13,779 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,779 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:29:13,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:29:13,782 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:29:13,783 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d 2024-11-26T10:29:13,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:29:13,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:29:13,794 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:29:13,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:29:13,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:29:13,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:13,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:29:13,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:29:13,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:13,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:29:13,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:29:13,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:13,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:29:13,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:29:13,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:13,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:13,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:29:13,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740 2024-11-26T10:29:13,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740 2024-11-26T10:29:13,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:29:13,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:29:13,806 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:29:13,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:29:13,809 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:29:13,809 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773356, jitterRate=-0.016627177596092224}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:29:13,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732616953795Initializing all the Stores at 1732616953795Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616953795Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616953796 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616953796Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616953796Cleaning up temporary data from old regions at 1732616953806 (+10 ms)Region opened successfully at 1732616953810 (+4 ms) 2024-11-26T10:29:13,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:29:13,810 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:29:13,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:29:13,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:29:13,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:29:13,811 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:29:13,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732616953810Disabling compacts and flushes for region at 1732616953810Disabling writes for close at 1732616953810Writing region close event to WAL at 1732616953811 (+1 ms)Closed at 1732616953811 2024-11-26T10:29:13,812 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:29:13,812 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:29:13,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:29:13,814 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:29:13,816 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:29:13,830 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(746): ClusterId : 2c0c30f2-2a7b-4d4e-af8c-eeb6d46aee32 2024-11-26T10:29:13,830 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:29:13,832 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:29:13,832 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:29:13,834 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:29:13,835 DEBUG [RS:0;94eedbb855cf:46275 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b6aff89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:29:13,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:13,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:13,852 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:46275 2024-11-26T10:29:13,852 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:29:13,852 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:29:13,852 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:29:13,853 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,39559,1732616953556 with port=46275, startcode=1732616953615 2024-11-26T10:29:13,853 DEBUG [RS:0;94eedbb855cf:46275 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:29:13,855 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57447, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:29:13,855 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39559 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,46275,1732616953615 2024-11-26T10:29:13,856 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39559 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,46275,1732616953615 2024-11-26T10:29:13,858 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d 2024-11-26T10:29:13,858 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36867 2024-11-26T10:29:13,858 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:29:13,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:29:13,861 DEBUG [RS:0;94eedbb855cf:46275 {}] zookeeper.ZKUtil(111): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,46275,1732616953615 2024-11-26T10:29:13,861 WARN [RS:0;94eedbb855cf:46275 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:29:13,861 INFO [RS:0;94eedbb855cf:46275 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:29:13,861 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615 2024-11-26T10:29:13,861 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,46275,1732616953615] 2024-11-26T10:29:13,866 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:29:13,868 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:29:13,869 INFO [RS:0;94eedbb855cf:46275 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:29:13,869 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,872 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:29:13,873 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:29:13,873 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,873 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,874 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,874 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:29:13,874 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:29:13,874 DEBUG [RS:0;94eedbb855cf:46275 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:29:13,875 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,875 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,875 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,875 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,875 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,875 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,46275,1732616953615-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:29:13,899 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:29:13,899 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,46275,1732616953615-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,899 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,899 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.Replication(171): 94eedbb855cf,46275,1732616953615 started 2024-11-26T10:29:13,922 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:13,922 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,46275,1732616953615, RpcServer on 94eedbb855cf/172.17.0.2:46275, sessionid=0x10153d289530001 2024-11-26T10:29:13,922 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:29:13,922 DEBUG [RS:0;94eedbb855cf:46275 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,46275,1732616953615 2024-11-26T10:29:13,922 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,46275,1732616953615' 2024-11-26T10:29:13,922 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:29:13,923 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:29:13,924 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:29:13,924 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:29:13,924 DEBUG [RS:0;94eedbb855cf:46275 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,46275,1732616953615 2024-11-26T10:29:13,924 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,46275,1732616953615' 2024-11-26T10:29:13,924 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:29:13,924 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:29:13,925 DEBUG [RS:0;94eedbb855cf:46275 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:29:13,925 INFO [RS:0;94eedbb855cf:46275 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:29:13,925 INFO [RS:0;94eedbb855cf:46275 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:29:13,966 WARN [94eedbb855cf:39559 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-26T10:29:14,027 INFO [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C46275%2C1732616953615, suffix=, logDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615, archiveDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/oldWALs, maxLogs=32 2024-11-26T10:29:14,027 INFO [RS:0;94eedbb855cf:46275 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46275%2C1732616953615.1732616954027 2024-11-26T10:29:14,038 INFO [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616954027 2024-11-26T10:29:14,039 DEBUG [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44549:44549),(127.0.0.1/127.0.0.1:44667:44667)] 2024-11-26T10:29:14,216 DEBUG [94eedbb855cf:39559 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:29:14,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,46275,1732616953615 2024-11-26T10:29:14,218 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,46275,1732616953615, state=OPENING 2024-11-26T10:29:14,220 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:29:14,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:14,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:29:14,222 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:29:14,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:29:14,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:29:14,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,46275,1732616953615}] 2024-11-26T10:29:14,376 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:29:14,377 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60249, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:29:14,381 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:29:14,381 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:29:14,383 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C46275%2C1732616953615.meta, suffix=.meta, logDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615, archiveDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/oldWALs, maxLogs=32 2024-11-26T10:29:14,383 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46275%2C1732616953615.meta.1732616954383.meta 2024-11-26T10:29:14,388 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.meta.1732616954383.meta 2024-11-26T10:29:14,389 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44549:44549),(127.0.0.1/127.0.0.1:44667:44667)] 2024-11-26T10:29:14,390 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:29:14,391 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:29:14,391 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:29:14,391 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:29:14,391 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:29:14,391 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:29:14,391 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:29:14,391 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:29:14,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:29:14,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:29:14,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:14,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:14,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:29:14,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:29:14,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:14,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:14,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:29:14,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:29:14,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:14,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:14,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:29:14,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:29:14,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:14,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:29:14,398 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:29:14,399 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740 2024-11-26T10:29:14,400 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740 2024-11-26T10:29:14,401 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:29:14,401 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:29:14,402 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:29:14,403 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:29:14,404 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752640, jitterRate=-0.04296962916851044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:29:14,405 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:29:14,405 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732616954391Writing region info on filesystem at 1732616954391Initializing all the Stores at 1732616954392 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616954392Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616954392Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616954392Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732616954392Cleaning up temporary data from old regions at 1732616954401 (+9 ms)Running coprocessor post-open hooks at 1732616954405 (+4 ms)Region opened successfully at 1732616954405 2024-11-26T10:29:14,406 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732616954375 2024-11-26T10:29:14,409 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:29:14,409 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:29:14,410 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,46275,1732616953615 2024-11-26T10:29:14,411 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,46275,1732616953615, state=OPEN 2024-11-26T10:29:14,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:29:14,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:29:14,418 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,46275,1732616953615 2024-11-26T10:29:14,418 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:29:14,418 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:29:14,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:29:14,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,46275,1732616953615 in 196 msec 2024-11-26T10:29:14,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:29:14,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 608 msec 2024-11-26T10:29:14,424 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:29:14,424 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:29:14,426 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:29:14,426 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,46275,1732616953615, seqNum=-1] 2024-11-26T10:29:14,426 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:29:14,427 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55141, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:29:14,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 664 msec 2024-11-26T10:29:14,433 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732616954433, completionTime=-1 2024-11-26T10:29:14,433 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:29:14,434 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:29:14,435 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:29:14,435 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732617014435 2024-11-26T10:29:14,435 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732617074435 2024-11-26T10:29:14,435 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-26T10:29:14,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39559,1732616953556-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39559,1732616953556-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39559,1732616953556-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:39559, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,438 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.786sec 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:29:14,439 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39559,1732616953556-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:29:14,440 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39559,1732616953556-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:29:14,442 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:29:14,442 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:29:14,442 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,39559,1732616953556-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:14,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34fe4f6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:29:14,530 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,39559,-1 for getting cluster id 2024-11-26T10:29:14,531 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:29:14,533 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2c0c30f2-2a7b-4d4e-af8c-eeb6d46aee32' 2024-11-26T10:29:14,533 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:29:14,533 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2c0c30f2-2a7b-4d4e-af8c-eeb6d46aee32" 2024-11-26T10:29:14,534 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@227c1be3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:29:14,534 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,39559,-1] 2024-11-26T10:29:14,534 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:29:14,534 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:29:14,535 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50324, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:29:14,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c7248d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:29:14,537 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:29:14,538 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,46275,1732616953615, seqNum=-1] 2024-11-26T10:29:14,539 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:29:14,539 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51020, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:29:14,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,39559,1732616953556 2024-11-26T10:29:14,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:29:14,543 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:29:14,544 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-26T10:29:14,545 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 94eedbb855cf,39559,1732616953556 2024-11-26T10:29:14,545 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@522f9e2e 2024-11-26T10:29:14,545 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:29:14,546 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50328, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:29:14,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-26T10:29:14,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-26T10:29:14,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:29:14,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:14,549 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:29:14,549 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:14,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-26T10:29:14,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:29:14,550 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:29:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741835_1011 (size=405) 2024-11-26T10:29:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741835_1011 (size=405) 2024-11-26T10:29:14,559 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 45788ac66acda36a3e2ca07089f47d9e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d 2024-11-26T10:29:14,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741836_1012 (size=88) 2024-11-26T10:29:14,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741836_1012 (size=88) 2024-11-26T10:29:14,567 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:29:14,567 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 45788ac66acda36a3e2ca07089f47d9e, disabling compactions & flushes 2024-11-26T10:29:14,567 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,567 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,567 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. after waiting 0 ms 2024-11-26T10:29:14,567 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,567 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,567 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 45788ac66acda36a3e2ca07089f47d9e: Waiting for close lock at 1732616954567Disabling compacts and flushes for region at 1732616954567Disabling writes for close at 1732616954567Writing region close event to WAL at 1732616954567Closed at 1732616954567 2024-11-26T10:29:14,569 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:29:14,569 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732616954569"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732616954569"}]},"ts":"1732616954569"} 2024-11-26T10:29:14,572 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-26T10:29:14,573 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:29:14,573 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616954573"}]},"ts":"1732616954573"} 2024-11-26T10:29:14,575 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-26T10:29:14,576 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45788ac66acda36a3e2ca07089f47d9e, ASSIGN}] 2024-11-26T10:29:14,577 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45788ac66acda36a3e2ca07089f47d9e, ASSIGN 2024-11-26T10:29:14,578 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45788ac66acda36a3e2ca07089f47d9e, ASSIGN; state=OFFLINE, location=94eedbb855cf,46275,1732616953615; forceNewPlan=false, retain=false 2024-11-26T10:29:14,729 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=45788ac66acda36a3e2ca07089f47d9e, regionState=OPENING, regionLocation=94eedbb855cf,46275,1732616953615 2024-11-26T10:29:14,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45788ac66acda36a3e2ca07089f47d9e, ASSIGN because future has completed 2024-11-26T10:29:14,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 45788ac66acda36a3e2ca07089f47d9e, server=94eedbb855cf,46275,1732616953615}] 2024-11-26T10:29:14,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:14,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:14,889 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,889 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 45788ac66acda36a3e2ca07089f47d9e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:29:14,889 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,889 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:29:14,889 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,889 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,891 INFO [StoreOpener-45788ac66acda36a3e2ca07089f47d9e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,892 INFO [StoreOpener-45788ac66acda36a3e2ca07089f47d9e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45788ac66acda36a3e2ca07089f47d9e columnFamilyName info 2024-11-26T10:29:14,892 DEBUG [StoreOpener-45788ac66acda36a3e2ca07089f47d9e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:29:14,893 INFO [StoreOpener-45788ac66acda36a3e2ca07089f47d9e-1 {}] regionserver.HStore(327): Store=45788ac66acda36a3e2ca07089f47d9e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:29:14,893 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,893 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,894 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,894 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,894 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,896 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,897 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:29:14,898 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 45788ac66acda36a3e2ca07089f47d9e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756850, jitterRate=-0.037616655230522156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:29:14,898 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:29:14,898 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 45788ac66acda36a3e2ca07089f47d9e: Running coprocessor pre-open hook at 1732616954890Writing region info on filesystem at 1732616954890Initializing all the Stores at 1732616954890Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732616954890Cleaning up temporary data from old regions at 1732616954894 (+4 ms)Running coprocessor post-open hooks at 1732616954898 (+4 ms)Region opened successfully at 1732616954898 2024-11-26T10:29:14,899 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e., pid=6, masterSystemTime=1732616954885 2024-11-26T10:29:14,902 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,902 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:14,903 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=45788ac66acda36a3e2ca07089f47d9e, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,46275,1732616953615 2024-11-26T10:29:14,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 45788ac66acda36a3e2ca07089f47d9e, server=94eedbb855cf,46275,1732616953615 because future has completed 2024-11-26T10:29:14,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-26T10:29:14,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 45788ac66acda36a3e2ca07089f47d9e, server=94eedbb855cf,46275,1732616953615 in 175 msec 2024-11-26T10:29:14,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-26T10:29:14,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45788ac66acda36a3e2ca07089f47d9e, ASSIGN in 333 msec 2024-11-26T10:29:14,913 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:29:14,913 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732616954913"}]},"ts":"1732616954913"} 2024-11-26T10:29:14,915 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-26T10:29:14,916 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:29:14,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 369 msec 2024-11-26T10:29:15,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:15,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:16,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:16,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:17,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:17,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:18,756 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:29:18,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,773 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,773 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,773 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:29:18,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:18,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:19,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:19,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:19,866 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:29:19,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-26T10:29:20,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:20,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:21,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:21,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:22,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:22,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:23,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-26T10:29:23,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-26T10:29:23,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:29:23,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-26T10:29:23,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-26T10:29:23,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-26T10:29:23,255 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:23,255 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-26T10:29:23,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:23,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:29:24,575 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-26T10:29:24,575 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-26T10:29:24,578 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:24,578 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:24,581 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e., hostname=94eedbb855cf,46275,1732616953615, seqNum=2] 2024-11-26T10:29:24,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:24,596 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:29:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-26T10:29:24,598 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:29:24,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:29:24,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46275 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-26T10:29:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:24,762 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 45788ac66acda36a3e2ca07089f47d9e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-26T10:29:24,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/e6f28e40535a4cf89ef3e2a4e24bb874 is 1080, key is row0001/info:/1732616964583/Put/seqid=0 2024-11-26T10:29:24,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741837_1013 (size=6033) 2024-11-26T10:29:24,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741837_1013 (size=6033) 2024-11-26T10:29:24,788 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/e6f28e40535a4cf89ef3e2a4e24bb874 2024-11-26T10:29:24,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/e6f28e40535a4cf89ef3e2a4e24bb874 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/e6f28e40535a4cf89ef3e2a4e24bb874 2024-11-26T10:29:24,799 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/e6f28e40535a4cf89ef3e2a4e24bb874, entries=1, sequenceid=5, filesize=5.9 K 2024-11-26T10:29:24,800 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45788ac66acda36a3e2ca07089f47d9e in 38ms, sequenceid=5, compaction requested=false 2024-11-26T10:29:24,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 45788ac66acda36a3e2ca07089f47d9e: 2024-11-26T10:29:24,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:24,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-26T10:29:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-26T10:29:24,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-26T10:29:24,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-11-26T10:29:24,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-26T10:29:24,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:24,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:25,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:25,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:26,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:26,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:27,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:27,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:28,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:28,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:29,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:29,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:30,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:30,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:31,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:31,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:32,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:32,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:33,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:33,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:34,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-26T10:29:34,654 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-26T10:29:34,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:34,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:34,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-26T10:29:34,660 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:29:34,661 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:29:34,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:29:34,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46275 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-26T10:29:34,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:34,815 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 45788ac66acda36a3e2ca07089f47d9e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-26T10:29:34,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb is 1080, key is row0002/info:/1732616974655/Put/seqid=0 2024-11-26T10:29:34,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741838_1014 (size=6033) 2024-11-26T10:29:34,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741838_1014 (size=6033) 2024-11-26T10:29:34,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:34,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:35,227 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb 2024-11-26T10:29:35,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb 2024-11-26T10:29:35,239 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb, entries=1, sequenceid=9, filesize=5.9 K 2024-11-26T10:29:35,241 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45788ac66acda36a3e2ca07089f47d9e in 426ms, sequenceid=9, compaction requested=false 2024-11-26T10:29:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 45788ac66acda36a3e2ca07089f47d9e: 2024-11-26T10:29:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:35,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-26T10:29:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-26T10:29:35,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-26T10:29:35,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 582 msec 2024-11-26T10:29:35,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 588 msec 2024-11-26T10:29:35,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:35,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:36,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:36,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:37,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:37,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:38,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:38,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:38,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:29:38,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta after 68034ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:29:39,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:39,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:40,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:40,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:41,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:41,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:42,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:42,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:43,536 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:29:43,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:43,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:44,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-26T10:29:44,694 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-26T10:29:44,700 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46275%2C1732616953615.1732616984700 2024-11-26T10:29:44,705 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:44,705 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:44,706 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:44,706 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:44,706 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:44,706 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616954027 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616984700 2024-11-26T10:29:44,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44667:44667),(127.0.0.1/127.0.0.1:44549:44549)] 2024-11-26T10:29:44,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616954027 is not closed yet, will try archiving it next time 2024-11-26T10:29:44,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:44,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741833_1009 (size=5546) 2024-11-26T10:29:44,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741833_1009 (size=5546) 2024-11-26T10:29:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-26T10:29:44,710 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:29:44,712 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:29:44,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:29:44,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46275 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-26T10:29:44,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:44,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:44,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:44,864 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 45788ac66acda36a3e2ca07089f47d9e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-26T10:29:44,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/cdfb3ed55ef24e47b48b0c722cdbe305 is 1080, key is row0003/info:/1732616984696/Put/seqid=0 2024-11-26T10:29:44,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741840_1016 (size=6033) 2024-11-26T10:29:44,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741840_1016 (size=6033) 2024-11-26T10:29:44,876 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/cdfb3ed55ef24e47b48b0c722cdbe305 2024-11-26T10:29:44,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/cdfb3ed55ef24e47b48b0c722cdbe305 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/cdfb3ed55ef24e47b48b0c722cdbe305 2024-11-26T10:29:44,888 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/cdfb3ed55ef24e47b48b0c722cdbe305, entries=1, sequenceid=13, filesize=5.9 K 2024-11-26T10:29:44,889 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45788ac66acda36a3e2ca07089f47d9e in 25ms, sequenceid=13, compaction requested=true 2024-11-26T10:29:44,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 45788ac66acda36a3e2ca07089f47d9e: 2024-11-26T10:29:44,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:44,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-26T10:29:44,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-26T10:29:44,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-26T10:29:44,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-26T10:29:44,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-26T10:29:45,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:45,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:46,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:46,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:47,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:47,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:48,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:48,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:49,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:49,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:50,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:50,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:51,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:51,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:52,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:52,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:53,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:53,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:54,442 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-26T10:29:54,442 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-26T10:29:54,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-26T10:29:54,804 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-26T10:29:54,804 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:29:54,805 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:29:54,806 DEBUG [Time-limited test {}] regionserver.HStore(1541): 45788ac66acda36a3e2ca07089f47d9e/info is initiating minor compaction (all files) 2024-11-26T10:29:54,806 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:29:54,806 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:29:54,806 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 45788ac66acda36a3e2ca07089f47d9e/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:54,806 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/e6f28e40535a4cf89ef3e2a4e24bb874, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/cdfb3ed55ef24e47b48b0c722cdbe305] into tmpdir=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp, totalSize=17.7 K 2024-11-26T10:29:54,807 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e6f28e40535a4cf89ef3e2a4e24bb874, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732616964583 2024-11-26T10:29:54,807 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c9ae5c3e1f5d4dc08f7ce07336e27dfb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732616974655 2024-11-26T10:29:54,807 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cdfb3ed55ef24e47b48b0c722cdbe305, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732616984696 2024-11-26T10:29:54,818 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 45788ac66acda36a3e2ca07089f47d9e#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:29:54,818 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/d4152a670dd143f6b391c2e7c8486dc2 is 1080, key is row0001/info:/1732616964583/Put/seqid=0 2024-11-26T10:29:54,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741841_1017 (size=8296) 2024-11-26T10:29:54,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741841_1017 (size=8296) 2024-11-26T10:29:54,829 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/d4152a670dd143f6b391c2e7c8486dc2 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/d4152a670dd143f6b391c2e7c8486dc2 2024-11-26T10:29:54,835 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 45788ac66acda36a3e2ca07089f47d9e/info of 45788ac66acda36a3e2ca07089f47d9e into d4152a670dd143f6b391c2e7c8486dc2(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:29:54,835 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 45788ac66acda36a3e2ca07089f47d9e: 2024-11-26T10:29:54,837 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46275%2C1732616953615.1732616994837 2024-11-26T10:29:54,843 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:54,843 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:54,843 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:54,843 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:54,843 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:29:54,843 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616984700 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616994837 2024-11-26T10:29:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741839_1015 (size=2520) 2024-11-26T10:29:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741839_1015 (size=2520) 2024-11-26T10:29:54,848 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44667:44667),(127.0.0.1/127.0.0.1:44549:44549)] 2024-11-26T10:29:54,849 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616954027 to hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/oldWALs/94eedbb855cf%2C46275%2C1732616953615.1732616954027 2024-11-26T10:29:54,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:54,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:29:54,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-26T10:29:54,852 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:29:54,853 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:29:54,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:29:54,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:54,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:55,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46275 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-26T10:29:55,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:55,006 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 45788ac66acda36a3e2ca07089f47d9e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-26T10:29:55,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/c35c44210fb74637bcbbea1326d37640 is 1080, key is row0000/info:/1732616994836/Put/seqid=0 2024-11-26T10:29:55,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741843_1019 (size=6033) 2024-11-26T10:29:55,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741843_1019 (size=6033) 2024-11-26T10:29:55,016 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/c35c44210fb74637bcbbea1326d37640 2024-11-26T10:29:55,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/c35c44210fb74637bcbbea1326d37640 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c35c44210fb74637bcbbea1326d37640 2024-11-26T10:29:55,027 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c35c44210fb74637bcbbea1326d37640, entries=1, sequenceid=18, filesize=5.9 K 2024-11-26T10:29:55,029 INFO [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45788ac66acda36a3e2ca07089f47d9e in 22ms, sequenceid=18, compaction requested=false 2024-11-26T10:29:55,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 45788ac66acda36a3e2ca07089f47d9e: 2024-11-26T10:29:55,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:29:55,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-26T10:29:55,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-26T10:29:55,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-26T10:29:55,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-26T10:29:55,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-26T10:29:55,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:55,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:56,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:56,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:57,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:57,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:58,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:58,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:59,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:59,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:29:59,889 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 45788ac66acda36a3e2ca07089f47d9e, had cached 0 bytes from a total of 14329 2024-11-26T10:30:00,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:00,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:01,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:01,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:02,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:02,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:03,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:03,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:04,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:04,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:04,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39559 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-26T10:30:04,955 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-26T10:30:04,958 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46275%2C1732616953615.1732617004958 2024-11-26T10:30:04,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:04,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:04,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:04,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:04,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:04,966 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616994837 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732617004958 2024-11-26T10:30:04,967 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44549:44549),(127.0.0.1/127.0.0.1:44667:44667)] 2024-11-26T10:30:04,967 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616994837 is not closed yet, will try archiving it next time 2024-11-26T10:30:04,967 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/WALs/94eedbb855cf,46275,1732616953615/94eedbb855cf%2C46275%2C1732616953615.1732616984700 to hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/oldWALs/94eedbb855cf%2C46275%2C1732616953615.1732616984700 2024-11-26T10:30:04,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:30:04,967 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:30:04,967 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:30:04,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:30:04,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:30:04,968 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:30:04,968 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1156108172, stopped=false 2024-11-26T10:30:04,968 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,39559,1732616953556 2024-11-26T10:30:04,968 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:30:04,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741842_1018 (size=2026) 2024-11-26T10:30:04,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741842_1018 (size=2026) 2024-11-26T10:30:04,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:30:04,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:30:04,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:04,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:04,970 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:30:04,970 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:30:04,970 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:30:04,970 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:30:04,971 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,46275,1732616953615' ***** 2024-11-26T10:30:04,971 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:30:04,971 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:30:04,971 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:30:04,971 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:30:04,971 INFO [RS:0;94eedbb855cf:46275 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:30:04,971 INFO [RS:0;94eedbb855cf:46275 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:30:04,971 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(3091): Received CLOSE for 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:30:04,971 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,46275,1732616953615 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:46275. 2024-11-26T10:30:04,974 DEBUG [RS:0;94eedbb855cf:46275 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:30:04,974 DEBUG [RS:0;94eedbb855cf:46275 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:30:04,974 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 45788ac66acda36a3e2ca07089f47d9e, disabling compactions & flushes 2024-11-26T10:30:04,974 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:30:04,974 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:30:04,974 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. after waiting 0 ms 2024-11-26T10:30:04,974 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:30:04,974 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 45788ac66acda36a3e2ca07089f47d9e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-26T10:30:04,974 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-26T10:30:04,974 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1325): Online Regions={45788ac66acda36a3e2ca07089f47d9e=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e., 1588230740=hbase:meta,,1.1588230740} 2024-11-26T10:30:04,974 DEBUG [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 45788ac66acda36a3e2ca07089f47d9e 2024-11-26T10:30:04,974 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:30:04,975 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:30:04,975 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:30:04,975 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:30:04,975 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:30:04,975 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-26T10:30:04,979 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/5d3eeef047aa49f99635499860d20fb6 is 1080, key is row0001/info:/1732617004956/Put/seqid=0 2024-11-26T10:30:04,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741845_1021 (size=6033) 2024-11-26T10:30:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741845_1021 (size=6033) 2024-11-26T10:30:04,990 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/5d3eeef047aa49f99635499860d20fb6 2024-11-26T10:30:04,992 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/info/9c88b3b727f64bd79779e672992a86dc is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e./info:regioninfo/1732616954903/Put/seqid=0 2024-11-26T10:30:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741846_1022 (size=7308) 2024-11-26T10:30:05,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741846_1022 (size=7308) 2024-11-26T10:30:05,000 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/info/9c88b3b727f64bd79779e672992a86dc 2024-11-26T10:30:05,001 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/.tmp/info/5d3eeef047aa49f99635499860d20fb6 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/5d3eeef047aa49f99635499860d20fb6 2024-11-26T10:30:05,006 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/5d3eeef047aa49f99635499860d20fb6, entries=1, sequenceid=22, filesize=5.9 K 2024-11-26T10:30:05,008 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45788ac66acda36a3e2ca07089f47d9e in 33ms, sequenceid=22, compaction requested=true 2024-11-26T10:30:05,011 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/e6f28e40535a4cf89ef3e2a4e24bb874, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/cdfb3ed55ef24e47b48b0c722cdbe305] to archive 2024-11-26T10:30:05,012 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:30:05,014 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/e6f28e40535a4cf89ef3e2a4e24bb874 to hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/e6f28e40535a4cf89ef3e2a4e24bb874 2024-11-26T10:30:05,015 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb to hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/c9ae5c3e1f5d4dc08f7ce07336e27dfb 2024-11-26T10:30:05,017 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/cdfb3ed55ef24e47b48b0c722cdbe305 to hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/info/cdfb3ed55ef24e47b48b0c722cdbe305 2024-11-26T10:30:05,017 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=94eedbb855cf:39559 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-26T10:30:05,017 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e6f28e40535a4cf89ef3e2a4e24bb874=6033, c9ae5c3e1f5d4dc08f7ce07336e27dfb=6033, cdfb3ed55ef24e47b48b0c722cdbe305=6033] 2024-11-26T10:30:05,022 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45788ac66acda36a3e2ca07089f47d9e/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-26T10:30:05,022 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:30:05,023 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 45788ac66acda36a3e2ca07089f47d9e: Waiting for close lock at 1732617004974Running coprocessor pre-close hooks at 1732617004974Disabling compacts and flushes for region at 1732617004974Disabling writes for close at 1732617004974Obtaining lock to block concurrent updates at 1732617004974Preparing flush snapshotting stores in 45788ac66acda36a3e2ca07089f47d9e at 1732617004974Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732617004975 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. at 1732617004975Flushing 45788ac66acda36a3e2ca07089f47d9e/info: creating writer at 1732617004975Flushing 45788ac66acda36a3e2ca07089f47d9e/info: appending metadata at 1732617004978 (+3 ms)Flushing 45788ac66acda36a3e2ca07089f47d9e/info: closing flushed file at 1732617004978Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cac8c8: reopening flushed file at 1732617005000 (+22 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45788ac66acda36a3e2ca07089f47d9e in 33ms, sequenceid=22, compaction requested=true at 1732617005008 (+8 ms)Writing region close event to WAL at 1732617005018 (+10 ms)Running coprocessor post-close hooks at 1732617005022 (+4 ms)Closed at 1732617005022 2024-11-26T10:30:05,023 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732616954546.45788ac66acda36a3e2ca07089f47d9e. 2024-11-26T10:30:05,025 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/ns/6d8f4182d09240aeb78a21cb0efaf0fb is 43, key is default/ns:d/1732616954428/Put/seqid=0 2024-11-26T10:30:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741847_1023 (size=5153) 2024-11-26T10:30:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741847_1023 (size=5153) 2024-11-26T10:30:05,031 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/ns/6d8f4182d09240aeb78a21cb0efaf0fb 2024-11-26T10:30:05,051 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/table/862a61b5f8154585b9761181c4484e81 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732616954913/Put/seqid=0 2024-11-26T10:30:05,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741848_1024 (size=5508) 2024-11-26T10:30:05,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741848_1024 (size=5508) 2024-11-26T10:30:05,056 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/table/862a61b5f8154585b9761181c4484e81 2024-11-26T10:30:05,062 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/info/9c88b3b727f64bd79779e672992a86dc as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/info/9c88b3b727f64bd79779e672992a86dc 2024-11-26T10:30:05,067 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/info/9c88b3b727f64bd79779e672992a86dc, entries=10, sequenceid=11, filesize=7.1 K 2024-11-26T10:30:05,068 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/ns/6d8f4182d09240aeb78a21cb0efaf0fb as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/ns/6d8f4182d09240aeb78a21cb0efaf0fb 2024-11-26T10:30:05,072 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/ns/6d8f4182d09240aeb78a21cb0efaf0fb, entries=2, sequenceid=11, filesize=5.0 K 2024-11-26T10:30:05,073 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/.tmp/table/862a61b5f8154585b9761181c4484e81 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/table/862a61b5f8154585b9761181c4484e81 2024-11-26T10:30:05,078 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/table/862a61b5f8154585b9761181c4484e81, entries=2, sequenceid=11, filesize=5.4 K 2024-11-26T10:30:05,079 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-26T10:30:05,084 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-26T10:30:05,084 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:30:05,084 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:30:05,084 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732617004974Running coprocessor pre-close hooks at 1732617004974Disabling compacts and flushes for region at 1732617004974Disabling writes for close at 1732617004975 (+1 ms)Obtaining lock to block concurrent updates at 1732617004975Preparing flush snapshotting stores in 1588230740 at 1732617004975Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732617004975Flushing stores of hbase:meta,,1.1588230740 at 1732617004976 (+1 ms)Flushing 1588230740/info: creating writer at 1732617004976Flushing 1588230740/info: appending metadata at 1732617004991 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732617004991Flushing 1588230740/ns: creating writer at 1732617005006 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732617005025 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732617005025Flushing 1588230740/table: creating writer at 1732617005036 (+11 ms)Flushing 1588230740/table: appending metadata at 1732617005050 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732617005050Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@369f697e: reopening flushed file at 1732617005061 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4576fe02: reopening flushed file at 1732617005067 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@464947df: reopening flushed file at 1732617005073 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false at 1732617005079 (+6 ms)Writing region close event to WAL at 1732617005081 (+2 ms)Running coprocessor post-close hooks at 1732617005084 (+3 ms)Closed at 1732617005084 2024-11-26T10:30:05,085 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:30:05,175 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,46275,1732616953615; all regions closed. 2024-11-26T10:30:05,175 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,175 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,175 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741834_1010 (size=3306) 2024-11-26T10:30:05,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741834_1010 (size=3306) 2024-11-26T10:30:05,180 DEBUG [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/oldWALs 2024-11-26T10:30:05,180 INFO [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C46275%2C1732616953615.meta:.meta(num 1732616954383) 2024-11-26T10:30:05,181 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,181 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,181 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,181 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,181 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741844_1020 (size=1252) 2024-11-26T10:30:05,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741844_1020 (size=1252) 2024-11-26T10:30:05,187 DEBUG [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/oldWALs 2024-11-26T10:30:05,187 INFO [RS:0;94eedbb855cf:46275 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C46275%2C1732616953615:(num 1732617004958) 2024-11-26T10:30:05,187 DEBUG [RS:0;94eedbb855cf:46275 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:30:05,187 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:30:05,187 INFO [RS:0;94eedbb855cf:46275 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:30:05,187 INFO [RS:0;94eedbb855cf:46275 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-26T10:30:05,188 INFO [RS:0;94eedbb855cf:46275 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:30:05,188 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:30:05,188 INFO [RS:0;94eedbb855cf:46275 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46275 2024-11-26T10:30:05,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,46275,1732616953615 2024-11-26T10:30:05,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:30:05,192 INFO [RS:0;94eedbb855cf:46275 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:30:05,193 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,46275,1732616953615] 2024-11-26T10:30:05,194 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,46275,1732616953615 already deleted, retry=false 2024-11-26T10:30:05,195 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,46275,1732616953615 expired; onlineServers=0 2024-11-26T10:30:05,195 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,39559,1732616953556' ***** 2024-11-26T10:30:05,195 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:30:05,195 INFO [M:0;94eedbb855cf:39559 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:30:05,195 INFO [M:0;94eedbb855cf:39559 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:30:05,195 DEBUG [M:0;94eedbb855cf:39559 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:30:05,195 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:30:05,195 DEBUG [M:0;94eedbb855cf:39559 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:30:05,195 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616953778 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732616953778,5,FailOnTimeoutGroup] 2024-11-26T10:30:05,195 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616953778 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732616953778,5,FailOnTimeoutGroup] 2024-11-26T10:30:05,195 INFO [M:0;94eedbb855cf:39559 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:30:05,195 INFO [M:0;94eedbb855cf:39559 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:30:05,195 DEBUG [M:0;94eedbb855cf:39559 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:30:05,195 INFO [M:0;94eedbb855cf:39559 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:30:05,195 INFO [M:0;94eedbb855cf:39559 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:30:05,196 INFO [M:0;94eedbb855cf:39559 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:30:05,196 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:30:05,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:30:05,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:05,196 DEBUG [M:0;94eedbb855cf:39559 {}] zookeeper.ZKUtil(347): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:30:05,196 WARN [M:0;94eedbb855cf:39559 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:30:05,197 INFO [M:0;94eedbb855cf:39559 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/.lastflushedseqids 2024-11-26T10:30:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741849_1025 (size=130) 2024-11-26T10:30:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741849_1025 (size=130) 2024-11-26T10:30:05,202 INFO [M:0;94eedbb855cf:39559 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:30:05,203 INFO [M:0;94eedbb855cf:39559 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:30:05,203 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:30:05,203 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:05,203 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:05,203 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:30:05,203 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:05,203 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-26T10:30:05,219 DEBUG [M:0;94eedbb855cf:39559 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7555c5bc9e6541b3b1225aaa243168cf is 82, key is hbase:meta,,1/info:regioninfo/1732616954410/Put/seqid=0 2024-11-26T10:30:05,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741850_1026 (size=5672) 2024-11-26T10:30:05,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741850_1026 (size=5672) 2024-11-26T10:30:05,225 INFO [M:0;94eedbb855cf:39559 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7555c5bc9e6541b3b1225aaa243168cf 2024-11-26T10:30:05,244 DEBUG [M:0;94eedbb855cf:39559 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10f5087a8def4feca52dcae44ef38ddb is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732616954917/Put/seqid=0 2024-11-26T10:30:05,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741851_1027 (size=7818) 2024-11-26T10:30:05,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741851_1027 (size=7818) 2024-11-26T10:30:05,250 INFO [M:0;94eedbb855cf:39559 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10f5087a8def4feca52dcae44ef38ddb 2024-11-26T10:30:05,255 INFO [M:0;94eedbb855cf:39559 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 10f5087a8def4feca52dcae44ef38ddb 2024-11-26T10:30:05,269 DEBUG [M:0;94eedbb855cf:39559 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00672d165994498abcb206492173805a is 69, key is 94eedbb855cf,46275,1732616953615/rs:state/1732616953856/Put/seqid=0 2024-11-26T10:30:05,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741852_1028 (size=5156) 2024-11-26T10:30:05,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741852_1028 (size=5156) 2024-11-26T10:30:05,274 INFO [M:0;94eedbb855cf:39559 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00672d165994498abcb206492173805a 2024-11-26T10:30:05,292 DEBUG [M:0;94eedbb855cf:39559 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bdfe7a9ea48947a9901f40ee771a7434 is 52, key is load_balancer_on/state:d/1732616954542/Put/seqid=0 2024-11-26T10:30:05,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:30:05,294 INFO [RS:0;94eedbb855cf:46275 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:30:05,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46275-0x10153d289530001, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:30:05,294 INFO [RS:0;94eedbb855cf:46275 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,46275,1732616953615; zookeeper connection closed. 2024-11-26T10:30:05,294 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b2a83e4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b2a83e4 2024-11-26T10:30:05,294 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:30:05,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741853_1029 (size=5056) 2024-11-26T10:30:05,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741853_1029 (size=5056) 2024-11-26T10:30:05,297 INFO [M:0;94eedbb855cf:39559 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bdfe7a9ea48947a9901f40ee771a7434 2024-11-26T10:30:05,303 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7555c5bc9e6541b3b1225aaa243168cf as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7555c5bc9e6541b3b1225aaa243168cf 2024-11-26T10:30:05,307 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7555c5bc9e6541b3b1225aaa243168cf, entries=8, sequenceid=121, filesize=5.5 K 2024-11-26T10:30:05,308 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10f5087a8def4feca52dcae44ef38ddb as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10f5087a8def4feca52dcae44ef38ddb 2024-11-26T10:30:05,313 INFO [M:0;94eedbb855cf:39559 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 10f5087a8def4feca52dcae44ef38ddb 2024-11-26T10:30:05,313 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10f5087a8def4feca52dcae44ef38ddb, entries=14, sequenceid=121, filesize=7.6 K 2024-11-26T10:30:05,314 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00672d165994498abcb206492173805a as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00672d165994498abcb206492173805a 2024-11-26T10:30:05,320 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00672d165994498abcb206492173805a, entries=1, sequenceid=121, filesize=5.0 K 2024-11-26T10:30:05,321 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bdfe7a9ea48947a9901f40ee771a7434 as hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bdfe7a9ea48947a9901f40ee771a7434 2024-11-26T10:30:05,326 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36867/user/jenkins/test-data/d2440638-f8b4-2162-d4f5-f523cd159c8d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bdfe7a9ea48947a9901f40ee771a7434, entries=1, sequenceid=121, filesize=4.9 K 2024-11-26T10:30:05,327 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false 2024-11-26T10:30:05,331 INFO [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:05,331 DEBUG [M:0;94eedbb855cf:39559 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732617005203Disabling compacts and flushes for region at 1732617005203Disabling writes for close at 1732617005203Obtaining lock to block concurrent updates at 1732617005203Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732617005203Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1732617005204 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732617005204Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732617005204Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732617005219 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732617005219Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732617005229 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732617005244 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732617005244Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732617005255 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732617005269 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732617005269Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732617005278 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732617005292 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732617005292Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cc0af7d: reopening flushed file at 1732617005302 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36d37089: reopening flushed file at 1732617005308 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10b66439: reopening flushed file at 1732617005313 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@791a47fd: reopening flushed file at 1732617005320 (+7 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false at 1732617005327 (+7 ms)Writing region close event to WAL at 1732617005331 (+4 ms)Closed at 1732617005331 2024-11-26T10:30:05,331 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,331 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,331 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,331 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:30:05,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33287 is added to blk_1073741830_1006 (size=52987) 2024-11-26T10:30:05,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40985 is added to blk_1073741830_1006 (size=52987) 2024-11-26T10:30:05,334 INFO [M:0;94eedbb855cf:39559 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:30:05,334 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:30:05,335 INFO [M:0;94eedbb855cf:39559 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39559 2024-11-26T10:30:05,335 INFO [M:0;94eedbb855cf:39559 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:30:05,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:30:05,438 INFO [M:0;94eedbb855cf:39559 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:30:05,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39559-0x10153d289530000, quorum=127.0.0.1:53861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:30:05,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72ef9fa2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:30:05,441 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@618f0457{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:30:05,441 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:30:05,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cac9c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:30:05,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b5fa12b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir/,STOPPED} 2024-11-26T10:30:05,442 WARN [BP-1347972047-172.17.0.2-1732616952795 heartbeating to localhost/127.0.0.1:36867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1347972047-172.17.0.2-1732616952795 (Datanode Uuid 59ed9d1c-efce-4773-8193-f9804574b8bc) service to localhost/127.0.0.1:36867 2024-11-26T10:30:05,443 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data3/current/BP-1347972047-172.17.0.2-1732616952795 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:30:05,443 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data4/current/BP-1347972047-172.17.0.2-1732616952795 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:30:05,443 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:30:05,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b24fbcd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:30:05,445 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21bca0b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:30:05,445 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:30:05,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59a5f2c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:30:05,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e7fac47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir/,STOPPED} 2024-11-26T10:30:05,447 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:30:05,447 WARN [BP-1347972047-172.17.0.2-1732616952795 heartbeating to localhost/127.0.0.1:36867 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:30:05,447 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:30:05,447 WARN [BP-1347972047-172.17.0.2-1732616952795 heartbeating to localhost/127.0.0.1:36867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1347972047-172.17.0.2-1732616952795 (Datanode Uuid a90634e4-8f09-4c0f-be14-7428bceef399) service to localhost/127.0.0.1:36867 2024-11-26T10:30:05,448 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data1/current/BP-1347972047-172.17.0.2-1732616952795 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:30:05,448 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/cluster_a4e1dc8b-dc03-fb22-bd40-f93db92a2a9c/data/data2/current/BP-1347972047-172.17.0.2-1732616952795 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:30:05,449 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:30:05,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b4e4fbe{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:30:05,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18baee58{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:30:05,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:30:05,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40e0483f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:30:05,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@701842fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir/,STOPPED} 2024-11-26T10:30:05,463 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:30:05,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:30:05,491 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 181) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36867 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36867 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36867 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/94eedbb855cf:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:36867 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36867 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=86 (was 71) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6113 (was 6268) 2024-11-26T10:30:05,500 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=86, ProcessCount=11, AvailableMemoryMB=6114 2024-11-26T10:30:05,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:30:05,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.log.dir so I do NOT create it in target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f 2024-11-26T10:30:05,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac38b9fd-b713-4e67-9d05-935840705b17/hadoop.tmp.dir so I do NOT create it in target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f 2024-11-26T10:30:05,500 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36, deleteOnExit=true 2024-11-26T10:30:05,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/test.cache.data in system properties and HBase conf 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:30:05,501 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:30:05,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:30:05,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:30:05,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:30:05,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:30:05,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:30:05,516 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:30:05,576 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:30:05,580 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:30:05,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:30:05,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:30:05,581 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:30:05,582 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:30:05,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e03861{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:30:05,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a0fc9ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:30:05,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@319ddf98{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/java.io.tmpdir/jetty-localhost-45143-hadoop-hdfs-3_4_1-tests_jar-_-any-16217535551356746506/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:30:05,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371dfe16{HTTP/1.1, (http/1.1)}{localhost:45143} 2024-11-26T10:30:05,697 INFO [Time-limited test {}] server.Server(415): Started @237844ms 2024-11-26T10:30:05,710 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:30:05,785 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:30:05,788 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:30:05,789 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:30:05,789 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:30:05,789 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:30:05,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@374d3611{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:30:05,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ecf816b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:30:05,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:05,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:05,878 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:30:05,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@444d0b71{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/java.io.tmpdir/jetty-localhost-46067-hadoop-hdfs-3_4_1-tests_jar-_-any-4858597232730172447/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:30:05,903 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3bc081d8{HTTP/1.1, (http/1.1)}{localhost:46067} 2024-11-26T10:30:05,903 INFO [Time-limited test {}] server.Server(415): Started @238050ms 2024-11-26T10:30:05,904 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:30:05,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:30:05,937 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:30:05,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:30:05,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:30:05,939 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:30:05,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64d2170c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:30:05,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41a74ab6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:30:06,019 WARN [Thread-1959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data1/current/BP-493305062-172.17.0.2-1732617005522/current, will proceed with Du for space computation calculation, 2024-11-26T10:30:06,019 WARN [Thread-1960 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data2/current/BP-493305062-172.17.0.2-1732617005522/current, will proceed with Du for space computation calculation, 2024-11-26T10:30:06,041 WARN [Thread-1938 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:30:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x432b4abeedd2f3a7 with lease ID 0xdee8e37d9fa185: Processing first storage report for DS-2da1853f-3e2f-4802-8906-94f0b5ed8ae9 from datanode DatanodeRegistration(127.0.0.1:44813, datanodeUuid=d5c2a637-0358-4490-8c1b-249e2208c819, infoPort=44661, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522) 2024-11-26T10:30:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x432b4abeedd2f3a7 with lease ID 0xdee8e37d9fa185: from storage DS-2da1853f-3e2f-4802-8906-94f0b5ed8ae9 node DatanodeRegistration(127.0.0.1:44813, datanodeUuid=d5c2a637-0358-4490-8c1b-249e2208c819, infoPort=44661, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:30:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x432b4abeedd2f3a7 with lease ID 0xdee8e37d9fa185: Processing first storage report for DS-1945c335-1bdf-44fa-80e0-dd0cbe6dfcf3 from datanode DatanodeRegistration(127.0.0.1:44813, datanodeUuid=d5c2a637-0358-4490-8c1b-249e2208c819, infoPort=44661, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522) 2024-11-26T10:30:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x432b4abeedd2f3a7 with lease ID 0xdee8e37d9fa185: from storage DS-1945c335-1bdf-44fa-80e0-dd0cbe6dfcf3 node DatanodeRegistration(127.0.0.1:44813, datanodeUuid=d5c2a637-0358-4490-8c1b-249e2208c819, infoPort=44661, infoSecurePort=0, ipcPort=44149, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:30:06,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18d1ee92{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/java.io.tmpdir/jetty-localhost-37739-hadoop-hdfs-3_4_1-tests_jar-_-any-12588872108296130418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:30:06,072 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78512cf7{HTTP/1.1, (http/1.1)}{localhost:37739} 2024-11-26T10:30:06,072 INFO [Time-limited test {}] server.Server(415): Started @238219ms 2024-11-26T10:30:06,073 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:30:06,170 WARN [Thread-1986 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data4/current/BP-493305062-172.17.0.2-1732617005522/current, will proceed with Du for space computation calculation, 2024-11-26T10:30:06,170 WARN [Thread-1985 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data3/current/BP-493305062-172.17.0.2-1732617005522/current, will proceed with Du for space computation calculation, 2024-11-26T10:30:06,186 WARN [Thread-1974 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:30:06,188 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bc81bacd39e0f42 with lease ID 0xdee8e37d9fa186: Processing first storage report for DS-dc7e9ccf-2452-4968-a79e-2b5703715a3b from datanode DatanodeRegistration(127.0.0.1:33803, datanodeUuid=be912820-3b3c-4149-a752-c348069613fa, infoPort=42675, infoSecurePort=0, ipcPort=41867, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522) 2024-11-26T10:30:06,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bc81bacd39e0f42 with lease ID 0xdee8e37d9fa186: from storage DS-dc7e9ccf-2452-4968-a79e-2b5703715a3b node DatanodeRegistration(127.0.0.1:33803, datanodeUuid=be912820-3b3c-4149-a752-c348069613fa, infoPort=42675, infoSecurePort=0, ipcPort=41867, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:30:06,189 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bc81bacd39e0f42 with lease ID 0xdee8e37d9fa186: Processing first storage report for DS-e79af069-70d3-4bbf-a453-cb3e3e2db435 from datanode DatanodeRegistration(127.0.0.1:33803, datanodeUuid=be912820-3b3c-4149-a752-c348069613fa, infoPort=42675, infoSecurePort=0, ipcPort=41867, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522) 2024-11-26T10:30:06,189 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bc81bacd39e0f42 with lease ID 0xdee8e37d9fa186: from storage DS-e79af069-70d3-4bbf-a453-cb3e3e2db435 node DatanodeRegistration(127.0.0.1:33803, datanodeUuid=be912820-3b3c-4149-a752-c348069613fa, infoPort=42675, infoSecurePort=0, ipcPort=41867, storageInfo=lv=-57;cid=testClusterID;nsid=1747845094;c=1732617005522), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:30:06,194 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f 2024-11-26T10:30:06,197 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/zookeeper_0, clientPort=58623, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:30:06,198 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58623 2024-11-26T10:30:06,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:30:06,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:30:06,210 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6 with version=8 2024-11-26T10:30:06,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:30:06,212 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:30:06,213 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:30:06,214 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34641 2024-11-26T10:30:06,215 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34641 connecting to ZooKeeper ensemble=127.0.0.1:58623 2024-11-26T10:30:06,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:346410x0, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:30:06,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34641-0x10153d357040000 connected 2024-11-26T10:30:06,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:30:06,244 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6, hbase.cluster.distributed=false 2024-11-26T10:30:06,246 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:30:06,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34641 2024-11-26T10:30:06,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34641 2024-11-26T10:30:06,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34641 2024-11-26T10:30:06,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34641 2024-11-26T10:30:06,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34641 2024-11-26T10:30:06,271 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:30:06,271 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:30:06,271 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:30:06,271 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:30:06,271 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:30:06,271 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:30:06,271 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:30:06,272 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:30:06,272 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46389 2024-11-26T10:30:06,274 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46389 connecting to ZooKeeper ensemble=127.0.0.1:58623 2024-11-26T10:30:06,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463890x0, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:30:06,281 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:463890x0, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:30:06,281 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46389-0x10153d357040001 connected 2024-11-26T10:30:06,281 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:30:06,282 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:30:06,282 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:30:06,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:30:06,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46389 2024-11-26T10:30:06,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46389 2024-11-26T10:30:06,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46389 2024-11-26T10:30:06,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46389 2024-11-26T10:30:06,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46389 2024-11-26T10:30:06,296 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:34641 2024-11-26T10:30:06,297 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:30:06,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:30:06,298 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:30:06,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,300 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:30:06,301 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,34641,1732617006212 from backup master directory 2024-11-26T10:30:06,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:30:06,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:30:06,303 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:30:06,303 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,308 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/hbase.id] with ID: 1e581fe7-71de-4d5b-bd95-f4f891aacbbc 2024-11-26T10:30:06,308 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/.tmp/hbase.id 2024-11-26T10:30:06,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:30:06,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:30:06,314 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/.tmp/hbase.id]:[hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/hbase.id] 2024-11-26T10:30:06,325 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:06,325 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:30:06,327 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-26T10:30:06,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:30:06,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:30:06,336 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:30:06,337 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:30:06,338 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:30:06,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:30:06,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:30:06,345 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store 2024-11-26T10:30:06,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:30:06,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:30:06,351 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:06,351 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:30:06,351 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:06,351 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:06,351 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:30:06,351 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:06,351 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:30:06,351 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732617006351Disabling compacts and flushes for region at 1732617006351Disabling writes for close at 1732617006351Writing region close event to WAL at 1732617006351Closed at 1732617006351 2024-11-26T10:30:06,352 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/.initializing 2024-11-26T10:30:06,352 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/WALs/94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,355 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C34641%2C1732617006212, suffix=, logDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/WALs/94eedbb855cf,34641,1732617006212, archiveDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/oldWALs, maxLogs=10 2024-11-26T10:30:06,355 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C34641%2C1732617006212.1732617006355 2024-11-26T10:30:06,359 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/WALs/94eedbb855cf,34641,1732617006212/94eedbb855cf%2C34641%2C1732617006212.1732617006355 2024-11-26T10:30:06,360 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44661:44661),(127.0.0.1/127.0.0.1:42675:42675)] 2024-11-26T10:30:06,361 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:30:06,361 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:06,361 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,361 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:30:06,363 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:06,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:30:06,365 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:30:06,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:30:06,366 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:30:06,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:30:06,368 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:30:06,368 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,369 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,369 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,370 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,370 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,371 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:30:06,372 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:30:06,374 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:30:06,374 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878629, jitterRate=0.11723460257053375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:30:06,375 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732617006361Initializing all the Stores at 1732617006362 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617006362Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617006362Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617006362Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617006362Cleaning up temporary data from old regions at 1732617006370 (+8 ms)Region opened successfully at 1732617006375 (+5 ms) 2024-11-26T10:30:06,376 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:30:06,379 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dc3dd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:30:06,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:30:06,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:30:06,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:30:06,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:30:06,380 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-26T10:30:06,381 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-26T10:30:06,381 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:30:06,383 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:30:06,384 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:30:06,386 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:30:06,386 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:30:06,387 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:30:06,387 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:30:06,388 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:30:06,389 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:30:06,391 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:30:06,392 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:30:06,393 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:30:06,395 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:30:06,396 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:30:06,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:30:06,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:30:06,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,398 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,34641,1732617006212, sessionid=0x10153d357040000, setting cluster-up flag (Was=false) 2024-11-26T10:30:06,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,408 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:30:06,409 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,418 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:30:06,419 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,34641,1732617006212 2024-11-26T10:30:06,420 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:30:06,422 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:30:06,422 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:30:06,422 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:30:06,423 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,34641,1732617006212 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:30:06,424 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,427 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732617036427 2024-11-26T10:30:06,427 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:30:06,428 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:30:06,428 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:30:06,429 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:30:06,429 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:30:06,429 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:30:06,430 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,430 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:30:06,432 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732617006429,5,FailOnTimeoutGroup] 2024-11-26T10:30:06,436 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732617006432,5,FailOnTimeoutGroup] 2024-11-26T10:30:06,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:30:06,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,436 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:30:06,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:30:06,444 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:30:06,444 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6 2024-11-26T10:30:06,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:30:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:30:06,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:06,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:30:06,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:30:06,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:06,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:30:06,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:30:06,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:06,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:30:06,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:30:06,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:06,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:30:06,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:30:06,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:06,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:30:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740 2024-11-26T10:30:06,462 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740 2024-11-26T10:30:06,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:30:06,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:30:06,463 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:30:06,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:30:06,466 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:30:06,466 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803701, jitterRate=0.021959081292152405}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:30:06,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732617006452Initializing all the Stores at 1732617006453 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617006453Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617006453Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617006453Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617006453Cleaning up temporary data from old regions at 1732617006463 (+10 ms)Region opened successfully at 1732617006467 (+4 ms) 2024-11-26T10:30:06,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:30:06,467 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:30:06,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:30:06,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:30:06,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:30:06,468 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:30:06,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732617006467Disabling compacts and flushes for region at 1732617006467Disabling writes for close at 1732617006467Writing region close event to WAL at 1732617006468 (+1 ms)Closed at 1732617006468 2024-11-26T10:30:06,469 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:30:06,469 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:30:06,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:30:06,470 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:30:06,471 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:30:06,487 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(746): ClusterId : 1e581fe7-71de-4d5b-bd95-f4f891aacbbc 2024-11-26T10:30:06,487 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:30:06,489 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:30:06,489 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:30:06,490 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:30:06,491 DEBUG [RS:0;94eedbb855cf:46389 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7cefa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:30:06,502 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:46389 2024-11-26T10:30:06,502 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:30:06,502 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:30:06,502 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:30:06,503 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,34641,1732617006212 with port=46389, startcode=1732617006271 2024-11-26T10:30:06,503 DEBUG [RS:0;94eedbb855cf:46389 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:30:06,505 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45937, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:30:06,506 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,506 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,508 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6 2024-11-26T10:30:06,508 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32975 2024-11-26T10:30:06,508 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:30:06,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:30:06,512 DEBUG [RS:0;94eedbb855cf:46389 {}] zookeeper.ZKUtil(111): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,512 WARN [RS:0;94eedbb855cf:46389 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:30:06,512 INFO [RS:0;94eedbb855cf:46389 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:30:06,512 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,512 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,46389,1732617006271] 2024-11-26T10:30:06,516 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:30:06,518 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:30:06,519 INFO [RS:0;94eedbb855cf:46389 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:30:06,519 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,519 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:30:06,520 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:30:06,520 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,520 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,520 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,520 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,520 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:30:06,521 DEBUG [RS:0;94eedbb855cf:46389 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:30:06,524 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,524 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,524 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,524 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,524 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,524 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,46389,1732617006271-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:30:06,548 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:30:06,548 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,46389,1732617006271-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,548 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,549 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.Replication(171): 94eedbb855cf,46389,1732617006271 started 2024-11-26T10:30:06,569 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:06,569 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,46389,1732617006271, RpcServer on 94eedbb855cf/172.17.0.2:46389, sessionid=0x10153d357040001 2024-11-26T10:30:06,570 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:30:06,570 DEBUG [RS:0;94eedbb855cf:46389 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,570 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,46389,1732617006271' 2024-11-26T10:30:06,570 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:30:06,570 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:30:06,571 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:30:06,571 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:30:06,571 DEBUG [RS:0;94eedbb855cf:46389 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,571 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,46389,1732617006271' 2024-11-26T10:30:06,571 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:30:06,571 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:30:06,572 DEBUG [RS:0;94eedbb855cf:46389 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:30:06,572 INFO [RS:0;94eedbb855cf:46389 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:30:06,572 INFO [RS:0;94eedbb855cf:46389 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:30:06,621 WARN [94eedbb855cf:34641 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-26T10:30:06,674 INFO [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C46389%2C1732617006271, suffix=, logDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271, archiveDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/oldWALs, maxLogs=32 2024-11-26T10:30:06,674 INFO [RS:0;94eedbb855cf:46389 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46389%2C1732617006271.1732617006674 2024-11-26T10:30:06,680 INFO [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617006674 2024-11-26T10:30:06,682 DEBUG [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44661:44661),(127.0.0.1/127.0.0.1:42675:42675)] 2024-11-26T10:30:06,872 DEBUG [94eedbb855cf:34641 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:30:06,872 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:06,874 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,46389,1732617006271, state=OPENING 2024-11-26T10:30:06,875 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:30:06,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:30:06,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:30:06,877 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:30:06,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:30:06,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,46389,1732617006271}] 2024-11-26T10:30:06,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:06,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:07,030 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:30:07,032 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33799, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:30:07,035 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:30:07,036 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:30:07,037 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C46389%2C1732617006271.meta, suffix=.meta, logDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271, archiveDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/oldWALs, maxLogs=32 2024-11-26T10:30:07,037 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46389%2C1732617006271.meta.1732617007037.meta 2024-11-26T10:30:07,045 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.meta.1732617007037.meta 2024-11-26T10:30:07,046 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44661:44661),(127.0.0.1/127.0.0.1:42675:42675)] 2024-11-26T10:30:07,047 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:30:07,048 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:30:07,048 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:30:07,048 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:30:07,048 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:30:07,048 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:07,048 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:30:07,048 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:30:07,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:30:07,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:30:07,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:07,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:07,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:30:07,052 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:30:07,052 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:07,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:07,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:30:07,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:30:07,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:07,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:07,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:30:07,054 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:30:07,054 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:07,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:30:07,054 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:30:07,055 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740 2024-11-26T10:30:07,056 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740 2024-11-26T10:30:07,057 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:30:07,057 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:30:07,058 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:30:07,059 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:30:07,060 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860052, jitterRate=0.09361386299133301}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:30:07,060 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:30:07,061 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732617007048Writing region info on filesystem at 1732617007048Initializing all the Stores at 1732617007049 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617007049Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617007050 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617007050Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617007050Cleaning up temporary data from old regions at 1732617007057 (+7 ms)Running coprocessor post-open hooks at 1732617007060 (+3 ms)Region opened successfully at 1732617007061 (+1 ms) 2024-11-26T10:30:07,062 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732617007030 2024-11-26T10:30:07,065 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:30:07,065 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:30:07,065 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:07,066 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,46389,1732617006271, state=OPEN 2024-11-26T10:30:07,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:30:07,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:30:07,074 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:07,074 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:30:07,074 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:30:07,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:30:07,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,46389,1732617006271 in 197 msec 2024-11-26T10:30:07,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:30:07,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-26T10:30:07,079 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:30:07,079 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:30:07,081 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:30:07,081 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,46389,1732617006271, seqNum=-1] 2024-11-26T10:30:07,081 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:30:07,082 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34627, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:30:07,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-26T10:30:07,087 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732617007087, completionTime=-1 2024-11-26T10:30:07,087 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:30:07,087 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:30:07,089 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:30:07,089 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732617067089 2024-11-26T10:30:07,089 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732617127089 2024-11-26T10:30:07,089 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-26T10:30:07,090 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,34641,1732617006212-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,090 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,34641,1732617006212-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,090 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,34641,1732617006212-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,090 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:34641, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,090 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,090 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,091 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.791sec 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,34641,1732617006212-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:30:07,094 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,34641,1732617006212-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:30:07,096 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:30:07,096 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:30:07,096 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,34641,1732617006212-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:30:07,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ea28eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:30:07,187 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,34641,-1 for getting cluster id 2024-11-26T10:30:07,188 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:30:07,189 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1e581fe7-71de-4d5b-bd95-f4f891aacbbc' 2024-11-26T10:30:07,189 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:30:07,189 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1e581fe7-71de-4d5b-bd95-f4f891aacbbc" 2024-11-26T10:30:07,190 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69b730f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:30:07,190 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,34641,-1] 2024-11-26T10:30:07,190 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:30:07,190 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:30:07,191 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:30:07,192 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42d059d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:30:07,192 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:30:07,193 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,46389,1732617006271, seqNum=-1] 2024-11-26T10:30:07,193 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:30:07,194 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:30:07,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,34641,1732617006212 2024-11-26T10:30:07,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:30:07,198 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:30:07,198 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-26T10:30:07,199 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 94eedbb855cf,34641,1732617006212 2024-11-26T10:30:07,199 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@102d6549 2024-11-26T10:30:07,199 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:30:07,200 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54240, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:30:07,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-26T10:30:07,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-26T10:30:07,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:30:07,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-26T10:30:07,204 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:30:07,204 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:07,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-26T10:30:07,205 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:30:07,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:30:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741835_1011 (size=381) 2024-11-26T10:30:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741835_1011 (size=381) 2024-11-26T10:30:07,216 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d718d021ae6be4df36112e62afd41f60, NAME => 'TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6 2024-11-26T10:30:07,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741836_1012 (size=64) 2024-11-26T10:30:07,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741836_1012 (size=64) 2024-11-26T10:30:07,222 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:07,222 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing d718d021ae6be4df36112e62afd41f60, disabling compactions & flushes 2024-11-26T10:30:07,222 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,222 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,222 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. after waiting 0 ms 2024-11-26T10:30:07,222 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,222 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,223 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d718d021ae6be4df36112e62afd41f60: Waiting for close lock at 1732617007222Disabling compacts and flushes for region at 1732617007222Disabling writes for close at 1732617007222Writing region close event to WAL at 1732617007222Closed at 1732617007222 2024-11-26T10:30:07,224 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:30:07,224 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732617007224"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617007224"}]},"ts":"1732617007224"} 2024-11-26T10:30:07,227 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-26T10:30:07,228 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:30:07,228 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617007228"}]},"ts":"1732617007228"} 2024-11-26T10:30:07,231 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-26T10:30:07,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, ASSIGN}] 2024-11-26T10:30:07,232 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, ASSIGN 2024-11-26T10:30:07,233 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, ASSIGN; state=OFFLINE, location=94eedbb855cf,46389,1732617006271; forceNewPlan=false, retain=false 2024-11-26T10:30:07,384 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d718d021ae6be4df36112e62afd41f60, regionState=OPENING, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:07,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, ASSIGN because future has completed 2024-11-26T10:30:07,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271}] 2024-11-26T10:30:07,543 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,543 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d718d021ae6be4df36112e62afd41f60, NAME => 'TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:30:07,544 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,544 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:07,544 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,544 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,545 INFO [StoreOpener-d718d021ae6be4df36112e62afd41f60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,546 INFO [StoreOpener-d718d021ae6be4df36112e62afd41f60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d718d021ae6be4df36112e62afd41f60 columnFamilyName info 2024-11-26T10:30:07,546 DEBUG [StoreOpener-d718d021ae6be4df36112e62afd41f60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:07,547 INFO [StoreOpener-d718d021ae6be4df36112e62afd41f60-1 {}] regionserver.HStore(327): Store=d718d021ae6be4df36112e62afd41f60/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:30:07,547 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,548 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,548 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,548 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,548 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,550 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,551 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:30:07,552 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d718d021ae6be4df36112e62afd41f60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884331, jitterRate=0.1244850903749466}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:30:07,552 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:07,552 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d718d021ae6be4df36112e62afd41f60: Running coprocessor pre-open hook at 1732617007544Writing region info on filesystem at 1732617007544Initializing all the Stores at 1732617007545 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617007545Cleaning up temporary data from old regions at 1732617007548 (+3 ms)Running coprocessor post-open hooks at 1732617007552 (+4 ms)Region opened successfully at 1732617007552 2024-11-26T10:30:07,554 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., pid=6, masterSystemTime=1732617007540 2024-11-26T10:30:07,556 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,556 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:07,556 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d718d021ae6be4df36112e62afd41f60, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:07,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 because future has completed 2024-11-26T10:30:07,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-26T10:30:07,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 in 173 msec 2024-11-26T10:30:07,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-26T10:30:07,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, ASSIGN in 331 msec 2024-11-26T10:30:07,566 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:30:07,566 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617007566"}]},"ts":"1732617007566"} 2024-11-26T10:30:07,568 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-26T10:30:07,569 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:30:07,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 368 msec 2024-11-26T10:30:07,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:07,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:08,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:08,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:09,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:09,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:10,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,549 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:30:10,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:10,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:10,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:11,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:11,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:12,516 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:30:12,517 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-26T10:30:12,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:12,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:13,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-26T10:30:13,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-26T10:30:13,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-26T10:30:13,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:13,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:14,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:14,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:15,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:15,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:16,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:16,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:17,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34641 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-26T10:30:17,295 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-26T10:30:17,295 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-26T10:30:17,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-26T10:30:17,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:17,300 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2] 2024-11-26T10:30:17,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:17,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:30:17,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/a272649ec6b44114a649d977f3d6edef is 1080, key is row0001/info:/1732617017301/Put/seqid=0 2024-11-26T10:30:17,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741837_1013 (size=12509) 2024-11-26T10:30:17,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741837_1013 (size=12509) 2024-11-26T10:30:17,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/a272649ec6b44114a649d977f3d6edef 2024-11-26T10:30:17,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/a272649ec6b44114a649d977f3d6edef as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/a272649ec6b44114a649d977f3d6edef 2024-11-26T10:30:17,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-26T10:30:17,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/a272649ec6b44114a649d977f3d6edef, entries=7, sequenceid=11, filesize=12.2 K 2024-11-26T10:30:17,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for d718d021ae6be4df36112e62afd41f60 in 47ms, sequenceid=11, compaction requested=false 2024-11-26T10:30:17,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33544 deadline: 1732617027353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:17,359 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:17,360 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:17,360 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2 because the exception is null or not the one we care about 2024-11-26T10:30:17,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:17,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:18,756 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:30:18,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:18,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:18,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:19,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:19,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:20,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:20,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:21,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:21,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:22,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:22,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:23,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:23,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:24,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:24,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:25,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:25,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:26,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:26,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:27,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-26T10:30:27,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/af4c19bec793478a95e5fe5734b032ad is 1080, key is row0008/info:/1732617017312/Put/seqid=0 2024-11-26T10:30:27,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741838_1014 (size=29761) 2024-11-26T10:30:27,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741838_1014 (size=29761) 2024-11-26T10:30:27,466 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/af4c19bec793478a95e5fe5734b032ad 2024-11-26T10:30:27,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/af4c19bec793478a95e5fe5734b032ad as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad 2024-11-26T10:30:27,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad, entries=23, sequenceid=37, filesize=29.1 K 2024-11-26T10:30:27,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for d718d021ae6be4df36112e62afd41f60 in 21ms, sequenceid=37, compaction requested=false 2024-11-26T10:30:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-26T10:30:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad because midkey is the same as first or last row 2024-11-26T10:30:27,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:27,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:28,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:28,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:29,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:30:29,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/caca244352db43049eb4739b9e70b7a7 is 1080, key is row0031/info:/1732617027457/Put/seqid=0 2024-11-26T10:30:29,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741839_1015 (size=12509) 2024-11-26T10:30:29,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741839_1015 (size=12509) 2024-11-26T10:30:29,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/caca244352db43049eb4739b9e70b7a7 2024-11-26T10:30:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/caca244352db43049eb4739b9e70b7a7 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/caca244352db43049eb4739b9e70b7a7 2024-11-26T10:30:29,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/caca244352db43049eb4739b9e70b7a7, entries=7, sequenceid=47, filesize=12.2 K 2024-11-26T10:30:29,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for d718d021ae6be4df36112e62afd41f60 in 22ms, sequenceid=47, compaction requested=true 2024-11-26T10:30:29,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:29,492 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-26T10:30:29,492 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:29,492 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad because midkey is the same as first or last row 2024-11-26T10:30:29,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d718d021ae6be4df36112e62afd41f60:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:30:29,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:29,492 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:30:29,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-26T10:30:29,493 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:30:29,494 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): d718d021ae6be4df36112e62afd41f60/info is initiating minor compaction (all files) 2024-11-26T10:30:29,494 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d718d021ae6be4df36112e62afd41f60/info in TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:29,494 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/a272649ec6b44114a649d977f3d6edef, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/caca244352db43049eb4739b9e70b7a7] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp, totalSize=53.5 K 2024-11-26T10:30:29,494 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting a272649ec6b44114a649d977f3d6edef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732617017301 2024-11-26T10:30:29,495 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting af4c19bec793478a95e5fe5734b032ad, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732617017312 2024-11-26T10:30:29,495 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting caca244352db43049eb4739b9e70b7a7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732617027457 2024-11-26T10:30:29,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/d3a6b763f7b7422891a3ae6c4a7d54d3 is 1080, key is row0038/info:/1732617029469/Put/seqid=0 2024-11-26T10:30:29,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741840_1016 (size=21141) 2024-11-26T10:30:29,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741840_1016 (size=21141) 2024-11-26T10:30:29,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/d3a6b763f7b7422891a3ae6c4a7d54d3 2024-11-26T10:30:29,510 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d718d021ae6be4df36112e62afd41f60#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:29,510 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/458429c22b824873b402c4af3d528955 is 1080, key is row0001/info:/1732617017301/Put/seqid=0 2024-11-26T10:30:29,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/d3a6b763f7b7422891a3ae6c4a7d54d3 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/d3a6b763f7b7422891a3ae6c4a7d54d3 2024-11-26T10:30:29,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/d3a6b763f7b7422891a3ae6c4a7d54d3, entries=15, sequenceid=65, filesize=20.6 K 2024-11-26T10:30:29,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for d718d021ae6be4df36112e62afd41f60 in 25ms, sequenceid=65, compaction requested=false 2024-11-26T10:30:29,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:29,518 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-11-26T10:30:29,518 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:29,518 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad because midkey is the same as first or last row 2024-11-26T10:30:29,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741841_1017 (size=44978) 2024-11-26T10:30:29,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741841_1017 (size=44978) 2024-11-26T10:30:29,527 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/458429c22b824873b402c4af3d528955 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 2024-11-26T10:30:29,533 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d718d021ae6be4df36112e62afd41f60/info of d718d021ae6be4df36112e62afd41f60 into 458429c22b824873b402c4af3d528955(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:29,533 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., storeName=d718d021ae6be4df36112e62afd41f60/info, priority=13, startTime=1732617029492; duration=0sec 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 because midkey is the same as first or last row 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 because midkey is the same as first or last row 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 because midkey is the same as first or last row 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:29,533 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d718d021ae6be4df36112e62afd41f60:info 2024-11-26T10:30:29,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:29,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:30,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:30,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:31,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-26T10:30:31,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/806997c212794f468bec5f228ca0ad25 is 1080, key is row0053/info:/1732617029493/Put/seqid=0 2024-11-26T10:30:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741842_1018 (size=18987) 2024-11-26T10:30:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741842_1018 (size=18987) 2024-11-26T10:30:31,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/806997c212794f468bec5f228ca0ad25 2024-11-26T10:30:31,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/806997c212794f468bec5f228ca0ad25 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/806997c212794f468bec5f228ca0ad25 2024-11-26T10:30:31,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/806997c212794f468bec5f228ca0ad25, entries=13, sequenceid=82, filesize=18.5 K 2024-11-26T10:30:31,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for d718d021ae6be4df36112e62afd41f60 in 23ms, sequenceid=82, compaction requested=true 2024-11-26T10:30:31,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:31,540 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,540 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,540 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 because midkey is the same as first or last row 2024-11-26T10:30:31,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d718d021ae6be4df36112e62afd41f60:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:30:31,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:31,540 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:30:31,542 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:30:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,542 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): d718d021ae6be4df36112e62afd41f60/info is initiating minor compaction (all files) 2024-11-26T10:30:31,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-26T10:30:31,542 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d718d021ae6be4df36112e62afd41f60/info in TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:31,542 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/d3a6b763f7b7422891a3ae6c4a7d54d3, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/806997c212794f468bec5f228ca0ad25] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp, totalSize=83.1 K 2024-11-26T10:30:31,542 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 458429c22b824873b402c4af3d528955, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732617017301 2024-11-26T10:30:31,543 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3a6b763f7b7422891a3ae6c4a7d54d3, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1732617029469 2024-11-26T10:30:31,543 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 806997c212794f468bec5f228ca0ad25, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732617029493 2024-11-26T10:30:31,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/47d42f898b51434b88fdd084c2a0f712 is 1080, key is row0066/info:/1732617031518/Put/seqid=0 2024-11-26T10:30:31,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741843_1019 (size=20064) 2024-11-26T10:30:31,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741843_1019 (size=20064) 2024-11-26T10:30:31,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/47d42f898b51434b88fdd084c2a0f712 2024-11-26T10:30:31,557 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d718d021ae6be4df36112e62afd41f60#info#compaction#62 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:31,557 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/2212a186639b4a5fb403255fc175689b is 1080, key is row0001/info:/1732617017301/Put/seqid=0 2024-11-26T10:30:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/47d42f898b51434b88fdd084c2a0f712 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/47d42f898b51434b88fdd084c2a0f712 2024-11-26T10:30:31,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741844_1020 (size=75378) 2024-11-26T10:30:31,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741844_1020 (size=75378) 2024-11-26T10:30:31,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/47d42f898b51434b88fdd084c2a0f712, entries=14, sequenceid=99, filesize=19.6 K 2024-11-26T10:30:31,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for d718d021ae6be4df36112e62afd41f60 in 23ms, sequenceid=99, compaction requested=false 2024-11-26T10:30:31,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:31,565 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,565 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,565 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 because midkey is the same as first or last row 2024-11-26T10:30:31,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-26T10:30:31,570 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/2212a186639b4a5fb403255fc175689b as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b 2024-11-26T10:30:31,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/3fe02274e3fd4105a4fd09c6695a1201 is 1080, key is row0080/info:/1732617031543/Put/seqid=0 2024-11-26T10:30:31,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741845_1021 (size=17894) 2024-11-26T10:30:31,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741845_1021 (size=17894) 2024-11-26T10:30:31,576 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d718d021ae6be4df36112e62afd41f60/info of d718d021ae6be4df36112e62afd41f60 into 2212a186639b4a5fb403255fc175689b(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:31,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/3fe02274e3fd4105a4fd09c6695a1201 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:31,576 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., storeName=d718d021ae6be4df36112e62afd41f60/info, priority=13, startTime=1732617031540; duration=0sec 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,576 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,577 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:31,577 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:31,577 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d718d021ae6be4df36112e62afd41f60:info 2024-11-26T10:30:31,579 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] assignment.AssignmentManager(1363): Split request from 94eedbb855cf,46389,1732617006271, parent={ENCODED => d718d021ae6be4df36112e62afd41f60, NAME => 'TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-26T10:30:31,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/3fe02274e3fd4105a4fd09c6695a1201 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/3fe02274e3fd4105a4fd09c6695a1201 2024-11-26T10:30:31,585 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:31,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/3fe02274e3fd4105a4fd09c6695a1201, entries=12, sequenceid=114, filesize=17.5 K 2024-11-26T10:30:31,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for d718d021ae6be4df36112e62afd41f60 in 21ms, sequenceid=114, compaction requested=true 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d718d021ae6be4df36112e62afd41f60: 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=110.7 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=110.7 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=110.7 K, sizeToCheck=16.0 K 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-26T10:30:31,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-26T10:30:31,590 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=d5d2b6bf03ac3f8189ad38bcc867036a, daughterB=49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:31,591 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=d5d2b6bf03ac3f8189ad38bcc867036a, daughterB=49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:31,591 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=d5d2b6bf03ac3f8189ad38bcc867036a, daughterB=49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:31,591 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=d5d2b6bf03ac3f8189ad38bcc867036a, daughterB=49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:31,592 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] assignment.AssignmentManager(1363): Split request from 94eedbb855cf,46389,1732617006271, parent={ENCODED => d718d021ae6be4df36112e62afd41f60, NAME => 'TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-26T10:30:31,593 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:31,594 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34641 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 2024-11-26T10:30:31,594 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 held by pid=7 2024-11-26T10:30:31,598 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, UNASSIGN}] 2024-11-26T10:30:31,601 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-26T10:30:31,601 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, UNASSIGN 2024-11-26T10:30:31,601 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 2024-11-26T10:30:31,603 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d718d021ae6be4df36112e62afd41f60, regionState=CLOSING, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:31,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, UNASSIGN because future has completed 2024-11-26T10:30:31,605 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-26T10:30:31,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271}] 2024-11-26T10:30:31,762 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,762 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-26T10:30:31,763 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing d718d021ae6be4df36112e62afd41f60, disabling compactions & flushes 2024-11-26T10:30:31,763 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:31,763 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:31,763 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. after waiting 0 ms 2024-11-26T10:30:31,763 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:31,763 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing d718d021ae6be4df36112e62afd41f60 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-26T10:30:31,767 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/736116411f334238ac38fc3bcba24bda is 1080, key is row0092/info:/1732617031566/Put/seqid=0 2024-11-26T10:30:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741846_1022 (size=10347) 2024-11-26T10:30:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741846_1022 (size=10347) 2024-11-26T10:30:31,772 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/736116411f334238ac38fc3bcba24bda 2024-11-26T10:30:31,777 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/.tmp/info/736116411f334238ac38fc3bcba24bda as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/736116411f334238ac38fc3bcba24bda 2024-11-26T10:30:31,781 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/736116411f334238ac38fc3bcba24bda, entries=5, sequenceid=123, filesize=10.1 K 2024-11-26T10:30:31,782 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for d718d021ae6be4df36112e62afd41f60 in 19ms, sequenceid=123, compaction requested=true 2024-11-26T10:30:31,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/a272649ec6b44114a649d977f3d6edef, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/caca244352db43049eb4739b9e70b7a7, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/d3a6b763f7b7422891a3ae6c4a7d54d3, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/806997c212794f468bec5f228ca0ad25] to archive 2024-11-26T10:30:31,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:30:31,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/a272649ec6b44114a649d977f3d6edef to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/a272649ec6b44114a649d977f3d6edef 2024-11-26T10:30:31,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/af4c19bec793478a95e5fe5734b032ad 2024-11-26T10:30:31,787 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/458429c22b824873b402c4af3d528955 2024-11-26T10:30:31,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/caca244352db43049eb4739b9e70b7a7 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/caca244352db43049eb4739b9e70b7a7 2024-11-26T10:30:31,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/d3a6b763f7b7422891a3ae6c4a7d54d3 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/d3a6b763f7b7422891a3ae6c4a7d54d3 2024-11-26T10:30:31,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/806997c212794f468bec5f228ca0ad25 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/806997c212794f468bec5f228ca0ad25 2024-11-26T10:30:31,797 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-26T10:30:31,798 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. 2024-11-26T10:30:31,798 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for d718d021ae6be4df36112e62afd41f60: Waiting for close lock at 1732617031763Running coprocessor pre-close hooks at 1732617031763Disabling compacts and flushes for region at 1732617031763Disabling writes for close at 1732617031763Obtaining lock to block concurrent updates at 1732617031763Preparing flush snapshotting stores in d718d021ae6be4df36112e62afd41f60 at 1732617031763Finished memstore snapshotting TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., syncing WAL and waiting on mvcc, flushsize=dataSize=5380, getHeapSize=6000, getOffHeapSize=0, getCellsCount=5 at 1732617031763Flushing stores of TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. at 1732617031764 (+1 ms)Flushing d718d021ae6be4df36112e62afd41f60/info: creating writer at 1732617031764Flushing d718d021ae6be4df36112e62afd41f60/info: appending metadata at 1732617031767 (+3 ms)Flushing d718d021ae6be4df36112e62afd41f60/info: closing flushed file at 1732617031767Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78cfd5f6: reopening flushed file at 1732617031776 (+9 ms)Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for d718d021ae6be4df36112e62afd41f60 in 19ms, sequenceid=123, compaction requested=true at 1732617031782 (+6 ms)Writing region close event to WAL at 1732617031794 (+12 ms)Running coprocessor post-close hooks at 1732617031798 (+4 ms)Closed at 1732617031798 2024-11-26T10:30:31,800 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,801 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d718d021ae6be4df36112e62afd41f60, regionState=CLOSED 2024-11-26T10:30:31,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 because future has completed 2024-11-26T10:30:31,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-26T10:30:31,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure d718d021ae6be4df36112e62afd41f60, server=94eedbb855cf,46389,1732617006271 in 199 msec 2024-11-26T10:30:31,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-26T10:30:31,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d718d021ae6be4df36112e62afd41f60, UNASSIGN in 208 msec 2024-11-26T10:30:31,817 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:31,821 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=d718d021ae6be4df36112e62afd41f60, threads=4 2024-11-26T10:30:31,823 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/47d42f898b51434b88fdd084c2a0f712 for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,823 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/3fe02274e3fd4105a4fd09c6695a1201 for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,823 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,823 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/736116411f334238ac38fc3bcba24bda for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,833 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/3fe02274e3fd4105a4fd09c6695a1201, top=true 2024-11-26T10:30:31,836 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/47d42f898b51434b88fdd084c2a0f712, top=true 2024-11-26T10:30:31,836 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/736116411f334238ac38fc3bcba24bda, top=true 2024-11-26T10:30:31,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741847_1023 (size=27) 2024-11-26T10:30:31,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741847_1023 (size=27) 2024-11-26T10:30:31,845 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201 for child: 49b94c9ac03f0f74a86a3751fc04740a, parent: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,846 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/3fe02274e3fd4105a4fd09c6695a1201 for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,847 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda for child: 49b94c9ac03f0f74a86a3751fc04740a, parent: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,847 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/736116411f334238ac38fc3bcba24bda for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,848 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712 for child: 49b94c9ac03f0f74a86a3751fc04740a, parent: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,848 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/47d42f898b51434b88fdd084c2a0f712 for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741848_1024 (size=27) 2024-11-26T10:30:31,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741848_1024 (size=27) 2024-11-26T10:30:31,856 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b for region: d718d021ae6be4df36112e62afd41f60 2024-11-26T10:30:31,858 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region d718d021ae6be4df36112e62afd41f60 Daughter A: [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60] storefiles, Daughter B: [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda] storefiles. 2024-11-26T10:30:31,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741849_1025 (size=71) 2024-11-26T10:30:31,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741849_1025 (size=71) 2024-11-26T10:30:31,868 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741850_1026 (size=71) 2024-11-26T10:30:31,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741850_1026 (size=71) 2024-11-26T10:30:31,881 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:31,890 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-26T10:30:31,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:31,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:31,892 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-26T10:30:31,895 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732617031894"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732617031894"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732617031894"}]},"ts":"1732617031894"} 2024-11-26T10:30:31,895 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732617031894"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617031894"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732617031894"}]},"ts":"1732617031894"} 2024-11-26T10:30:31,895 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732617031894"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617031894"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732617031894"}]},"ts":"1732617031894"} 2024-11-26T10:30:31,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d5d2b6bf03ac3f8189ad38bcc867036a, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49b94c9ac03f0f74a86a3751fc04740a, ASSIGN}] 2024-11-26T10:30:31,915 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49b94c9ac03f0f74a86a3751fc04740a, ASSIGN 2024-11-26T10:30:31,915 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d5d2b6bf03ac3f8189ad38bcc867036a, ASSIGN 2024-11-26T10:30:31,916 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d5d2b6bf03ac3f8189ad38bcc867036a, ASSIGN; state=SPLITTING_NEW, location=94eedbb855cf,46389,1732617006271; forceNewPlan=false, retain=false 2024-11-26T10:30:31,916 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49b94c9ac03f0f74a86a3751fc04740a, ASSIGN; state=SPLITTING_NEW, location=94eedbb855cf,46389,1732617006271; forceNewPlan=false, retain=false 2024-11-26T10:30:32,067 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=d5d2b6bf03ac3f8189ad38bcc867036a, regionState=OPENING, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:32,067 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=49b94c9ac03f0f74a86a3751fc04740a, regionState=OPENING, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:32,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49b94c9ac03f0f74a86a3751fc04740a, ASSIGN because future has completed 2024-11-26T10:30:32,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271}] 2024-11-26T10:30:32,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d5d2b6bf03ac3f8189ad38bcc867036a, ASSIGN because future has completed 2024-11-26T10:30:32,071 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5d2b6bf03ac3f8189ad38bcc867036a, server=94eedbb855cf,46389,1732617006271}] 2024-11-26T10:30:32,224 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:30:32,224 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => d5d2b6bf03ac3f8189ad38bcc867036a, NAME => 'TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-26T10:30:32,225 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,225 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:32,225 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,225 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,226 INFO [StoreOpener-d5d2b6bf03ac3f8189ad38bcc867036a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,227 INFO [StoreOpener-d5d2b6bf03ac3f8189ad38bcc867036a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d5d2b6bf03ac3f8189ad38bcc867036a columnFamilyName info 2024-11-26T10:30:32,227 DEBUG [StoreOpener-d5d2b6bf03ac3f8189ad38bcc867036a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:32,236 DEBUG [StoreOpener-d5d2b6bf03ac3f8189ad38bcc867036a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60->hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b-bottom 2024-11-26T10:30:32,237 INFO [StoreOpener-d5d2b6bf03ac3f8189ad38bcc867036a-1 {}] regionserver.HStore(327): Store=d5d2b6bf03ac3f8189ad38bcc867036a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:30:32,237 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,238 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,239 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,239 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,239 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,240 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,241 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened d5d2b6bf03ac3f8189ad38bcc867036a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782670, jitterRate=-0.004783883690834045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:30:32,241 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:30:32,242 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for d5d2b6bf03ac3f8189ad38bcc867036a: Running coprocessor pre-open hook at 1732617032225Writing region info on filesystem at 1732617032225Initializing all the Stores at 1732617032226 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617032226Cleaning up temporary data from old regions at 1732617032239 (+13 ms)Running coprocessor post-open hooks at 1732617032241 (+2 ms)Region opened successfully at 1732617032242 (+1 ms) 2024-11-26T10:30:32,243 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a., pid=14, masterSystemTime=1732617032221 2024-11-26T10:30:32,243 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store d5d2b6bf03ac3f8189ad38bcc867036a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:30:32,243 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:32,243 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-26T10:30:32,243 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:30:32,243 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): d5d2b6bf03ac3f8189ad38bcc867036a/info is initiating minor compaction (all files) 2024-11-26T10:30:32,243 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d5d2b6bf03ac3f8189ad38bcc867036a/info in TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:30:32,244 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60->hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b-bottom] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/.tmp, totalSize=73.6 K 2024-11-26T10:30:32,244 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732617017301 2024-11-26T10:30:32,245 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:30:32,245 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:30:32,245 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:32,245 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 49b94c9ac03f0f74a86a3751fc04740a, NAME => 'TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-26T10:30:32,245 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,245 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:30:32,246 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,246 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,246 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=d5d2b6bf03ac3f8189ad38bcc867036a, regionState=OPEN, openSeqNum=127, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:32,247 INFO [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,247 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-26T10:30:32,248 INFO [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49b94c9ac03f0f74a86a3751fc04740a columnFamilyName info 2024-11-26T10:30:32,248 DEBUG [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:30:32,248 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-26T10:30:32,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-26T10:30:32,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5d2b6bf03ac3f8189ad38bcc867036a, server=94eedbb855cf,46389,1732617006271 because future has completed 2024-11-26T10:30:32,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-26T10:30:32,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure d5d2b6bf03ac3f8189ad38bcc867036a, server=94eedbb855cf,46389,1732617006271 in 178 msec 2024-11-26T10:30:32,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d5d2b6bf03ac3f8189ad38bcc867036a, ASSIGN in 337 msec 2024-11-26T10:30:32,265 DEBUG [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60->hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b-top 2024-11-26T10:30:32,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/info/b6a44d8cf3bd4fe0b4730b3af1008abb is 193, key is TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a./info:regioninfo/1732617032067/Put/seqid=0 2024-11-26T10:30:32,270 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d5d2b6bf03ac3f8189ad38bcc867036a#info#compaction#65 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:32,271 DEBUG [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201 2024-11-26T10:30:32,271 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/.tmp/info/8a18d3a452144d28b5bd4ad91927dcc2 is 1080, key is row0001/info:/1732617017301/Put/seqid=0 2024-11-26T10:30:32,276 DEBUG [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712 2024-11-26T10:30:32,280 DEBUG [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda 2024-11-26T10:30:32,280 INFO [StoreOpener-49b94c9ac03f0f74a86a3751fc04740a-1 {}] regionserver.HStore(327): Store=49b94c9ac03f0f74a86a3751fc04740a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:30:32,280 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,281 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,282 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,282 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,282 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,284 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,285 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 49b94c9ac03f0f74a86a3751fc04740a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843467, jitterRate=0.07252414524555206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:30:32,285 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:32,285 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 49b94c9ac03f0f74a86a3751fc04740a: Running coprocessor pre-open hook at 1732617032246Writing region info on filesystem at 1732617032246Initializing all the Stores at 1732617032246Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617032246Cleaning up temporary data from old regions at 1732617032282 (+36 ms)Running coprocessor post-open hooks at 1732617032285 (+3 ms)Region opened successfully at 1732617032285 2024-11-26T10:30:32,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741851_1027 (size=9847) 2024-11-26T10:30:32,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741851_1027 (size=9847) 2024-11-26T10:30:32,286 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., pid=13, masterSystemTime=1732617032221 2024-11-26T10:30:32,287 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:30:32,287 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:32,287 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:30:32,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/info/b6a44d8cf3bd4fe0b4730b3af1008abb 2024-11-26T10:30:32,288 INFO [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:32,288 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:30:32,289 INFO [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:32,289 INFO [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60->hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b-top, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=120.8 K 2024-11-26T10:30:32,289 DEBUG [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:32,289 INFO [RS_OPEN_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:32,290 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] compactions.Compactor(225): Compacting 2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732617017301 2024-11-26T10:30:32,290 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732617031518 2024-11-26T10:30:32,290 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=49b94c9ac03f0f74a86a3751fc04740a, regionState=OPEN, openSeqNum=127, regionLocation=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:32,292 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732617031543 2024-11-26T10:30:32,292 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732617031566 2024-11-26T10:30:32,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 because future has completed 2024-11-26T10:30:32,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-26T10:30:32,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 in 226 msec 2024-11-26T10:30:32,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-11-26T10:30:32,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49b94c9ac03f0f74a86a3751fc04740a, ASSIGN in 385 msec 2024-11-26T10:30:32,308 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=d5d2b6bf03ac3f8189ad38bcc867036a, daughterB=49b94c9ac03f0f74a86a3751fc04740a in 721 msec 2024-11-26T10:30:32,308 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 2024-11-26T10:30:32,308 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 2024-11-26T10:30:32,308 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 2024-11-26T10:30:32,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741852_1028 (size=70862) 2024-11-26T10:30:32,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741852_1028 (size=70862) 2024-11-26T10:30:32,310 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => d718d021ae6be4df36112e62afd41f60, NAME => 'TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-26T10:30:32,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d718d021ae6be4df36112e62afd41f60, daughterA=6e5f8ab02f1dc31464e6c79f25a71c1d, daughterB=bec1236b89f74d4a95b6d518f5d11e29 in 717 msec 2024-11-26T10:30:32,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/ns/af904d3e000d4d21a330ec84fde82835 is 43, key is default/ns:d/1732617007083/Put/seqid=0 2024-11-26T10:30:32,316 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/.tmp/info/8a18d3a452144d28b5bd4ad91927dcc2 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/8a18d3a452144d28b5bd4ad91927dcc2 2024-11-26T10:30:32,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741853_1029 (size=5153) 2024-11-26T10:30:32,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741853_1029 (size=5153) 2024-11-26T10:30:32,328 INFO [RS:0;94eedbb855cf:46389-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:32,328 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in d5d2b6bf03ac3f8189ad38bcc867036a/info of d5d2b6bf03ac3f8189ad38bcc867036a into 8a18d3a452144d28b5bd4ad91927dcc2(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:32,328 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d5d2b6bf03ac3f8189ad38bcc867036a: 2024-11-26T10:30:32,328 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/75145a24fbd04c28987130a694f4fdaf is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:30:32,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/ns/af904d3e000d4d21a330ec84fde82835 2024-11-26T10:30:32,328 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a., storeName=d5d2b6bf03ac3f8189ad38bcc867036a/info, priority=15, startTime=1732617032243; duration=0sec 2024-11-26T10:30:32,328 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:32,328 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d5d2b6bf03ac3f8189ad38bcc867036a:info 2024-11-26T10:30:32,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741854_1030 (size=43081) 2024-11-26T10:30:32,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741854_1030 (size=43081) 2024-11-26T10:30:32,340 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/75145a24fbd04c28987130a694f4fdaf as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75145a24fbd04c28987130a694f4fdaf 2024-11-26T10:30:32,346 INFO [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into 75145a24fbd04c28987130a694f4fdaf(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:32,346 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:32,346 INFO [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=12, startTime=1732617032287; duration=0sec 2024-11-26T10:30:32,346 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:32,346 DEBUG [RS:0;94eedbb855cf:46389-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:30:32,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/table/1afe0e1ff62b4109ab6be60f9267a570 is 65, key is TestLogRolling-testLogRolling/table:state/1732617007566/Put/seqid=0 2024-11-26T10:30:32,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741855_1031 (size=5340) 2024-11-26T10:30:32,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741855_1031 (size=5340) 2024-11-26T10:30:32,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/table/1afe0e1ff62b4109ab6be60f9267a570 2024-11-26T10:30:32,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/info/b6a44d8cf3bd4fe0b4730b3af1008abb as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/info/b6a44d8cf3bd4fe0b4730b3af1008abb 2024-11-26T10:30:32,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/info/b6a44d8cf3bd4fe0b4730b3af1008abb, entries=30, sequenceid=17, filesize=9.6 K 2024-11-26T10:30:32,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/ns/af904d3e000d4d21a330ec84fde82835 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/ns/af904d3e000d4d21a330ec84fde82835 2024-11-26T10:30:32,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/ns/af904d3e000d4d21a330ec84fde82835, entries=2, sequenceid=17, filesize=5.0 K 2024-11-26T10:30:32,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/table/1afe0e1ff62b4109ab6be60f9267a570 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/table/1afe0e1ff62b4109ab6be60f9267a570 2024-11-26T10:30:32,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/table/1afe0e1ff62b4109ab6be60f9267a570, entries=2, sequenceid=17, filesize=5.2 K 2024-11-26T10:30:32,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 132ms, sequenceid=17, compaction requested=false 2024-11-26T10:30:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-26T10:30:32,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:32,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33544 deadline: 1732617043576, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. is not online on 94eedbb855cf,46389,1732617006271 2024-11-26T10:30:33,578 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. is not online on 94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:33,578 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60. is not online on 94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:33,578 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732617007201.d718d021ae6be4df36112e62afd41f60., hostname=94eedbb855cf,46389,1732617006271, seqNum=2 from cache 2024-11-26T10:30:33,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:33,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:34,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:34,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:35,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:35,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:36,194 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:30:36,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:36,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:36,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:37,331 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:30:37,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:30:37,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:37,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:38,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:38,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:39,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:39,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:40,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:40,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:41,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:41,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:42,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:42,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:43,650 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127] 2024-11-26T10:30:43,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:43,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:30:43,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ac6ea7c15edb444186877dbdecb3a74f is 1080, key is row0097/info:/1732617043651/Put/seqid=0 2024-11-26T10:30:43,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741856_1032 (size=12516) 2024-11-26T10:30:43,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741856_1032 (size=12516) 2024-11-26T10:30:43,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ac6ea7c15edb444186877dbdecb3a74f 2024-11-26T10:30:43,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ac6ea7c15edb444186877dbdecb3a74f as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ac6ea7c15edb444186877dbdecb3a74f 2024-11-26T10:30:43,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ac6ea7c15edb444186877dbdecb3a74f, entries=7, sequenceid=137, filesize=12.2 K 2024-11-26T10:30:43,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 49b94c9ac03f0f74a86a3751fc04740a in 20ms, sequenceid=137, compaction requested=false 2024-11-26T10:30:43,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:43,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:43,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-26T10:30:43,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/bcabaa160fe94d16ad61afe48f2df7eb is 1080, key is row0104/info:/1732617043661/Put/seqid=0 2024-11-26T10:30:43,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741857_1033 (size=20078) 2024-11-26T10:30:43,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741857_1033 (size=20078) 2024-11-26T10:30:43,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/bcabaa160fe94d16ad61afe48f2df7eb 2024-11-26T10:30:43,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/bcabaa160fe94d16ad61afe48f2df7eb as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bcabaa160fe94d16ad61afe48f2df7eb 2024-11-26T10:30:43,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bcabaa160fe94d16ad61afe48f2df7eb, entries=14, sequenceid=154, filesize=19.6 K 2024-11-26T10:30:43,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 49b94c9ac03f0f74a86a3751fc04740a in 19ms, sequenceid=154, compaction requested=true 2024-11-26T10:30:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:43,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:30:43,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:43,701 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:30:43,701 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75675 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:30:43,701 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:30:43,702 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:43,702 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75145a24fbd04c28987130a694f4fdaf, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ac6ea7c15edb444186877dbdecb3a74f, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bcabaa160fe94d16ad61afe48f2df7eb] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=73.9 K 2024-11-26T10:30:43,702 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 75145a24fbd04c28987130a694f4fdaf, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732617029510 2024-11-26T10:30:43,702 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting ac6ea7c15edb444186877dbdecb3a74f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732617043651 2024-11-26T10:30:43,702 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting bcabaa160fe94d16ad61afe48f2df7eb, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732617043661 2024-11-26T10:30:43,712 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#72 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:43,713 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/77100680eb6a4b7ca82e39a646841495 is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:30:43,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741858_1034 (size=65889) 2024-11-26T10:30:43,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741858_1034 (size=65889) 2024-11-26T10:30:43,741 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/77100680eb6a4b7ca82e39a646841495 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/77100680eb6a4b7ca82e39a646841495 2024-11-26T10:30:43,747 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into 77100680eb6a4b7ca82e39a646841495(size=64.3 K), total size for store is 64.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:43,747 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:43,747 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=13, startTime=1732617043700; duration=0sec 2024-11-26T10:30:43,747 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:43,747 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:30:43,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:43,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:44,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:44,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:45,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-26T10:30:45,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/25f251085d52422eaa7853c668408e74 is 1080, key is row0118/info:/1732617043682/Put/seqid=0 2024-11-26T10:30:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741859_1035 (size=17906) 2024-11-26T10:30:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741859_1035 (size=17906) 2024-11-26T10:30:45,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/25f251085d52422eaa7853c668408e74 2024-11-26T10:30:45,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/25f251085d52422eaa7853c668408e74 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/25f251085d52422eaa7853c668408e74 2024-11-26T10:30:45,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/25f251085d52422eaa7853c668408e74, entries=12, sequenceid=170, filesize=17.5 K 2024-11-26T10:30:45,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 49b94c9ac03f0f74a86a3751fc04740a in 22ms, sequenceid=170, compaction requested=false 2024-11-26T10:30:45,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:45,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-26T10:30:45,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/84a7c4fcec8f436cabd56d056925f406 is 1080, key is row0130/info:/1732617045701/Put/seqid=0 2024-11-26T10:30:45,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741860_1036 (size=22238) 2024-11-26T10:30:45,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741860_1036 (size=22238) 2024-11-26T10:30:45,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/84a7c4fcec8f436cabd56d056925f406 2024-11-26T10:30:45,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/84a7c4fcec8f436cabd56d056925f406 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/84a7c4fcec8f436cabd56d056925f406 2024-11-26T10:30:45,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-26T10:30:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33544 deadline: 1732617055743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:45,744 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:45,744 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:45,744 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 because the exception is null or not the one we care about 2024-11-26T10:30:45,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/84a7c4fcec8f436cabd56d056925f406, entries=16, sequenceid=189, filesize=21.7 K 2024-11-26T10:30:45,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 49b94c9ac03f0f74a86a3751fc04740a in 22ms, sequenceid=189, compaction requested=true 2024-11-26T10:30:45,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:45,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:30:45,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:45,746 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:30:45,747 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 106033 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:30:45,747 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:30:45,747 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:45,748 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/77100680eb6a4b7ca82e39a646841495, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/25f251085d52422eaa7853c668408e74, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/84a7c4fcec8f436cabd56d056925f406] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=103.5 K 2024-11-26T10:30:45,748 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77100680eb6a4b7ca82e39a646841495, keycount=56, bloomtype=ROW, size=64.3 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732617029510 2024-11-26T10:30:45,748 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25f251085d52422eaa7853c668408e74, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732617043682 2024-11-26T10:30:45,749 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 84a7c4fcec8f436cabd56d056925f406, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1732617045701 2024-11-26T10:30:45,758 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#75 average throughput is 86.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:45,759 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ed079d325ea6414bb7d6e3589366fe8c is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:30:45,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741861_1037 (size=96252) 2024-11-26T10:30:45,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741861_1037 (size=96252) 2024-11-26T10:30:45,769 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ed079d325ea6414bb7d6e3589366fe8c as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ed079d325ea6414bb7d6e3589366fe8c 2024-11-26T10:30:45,774 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into ed079d325ea6414bb7d6e3589366fe8c(size=94.0 K), total size for store is 94.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:45,775 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:45,775 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=13, startTime=1732617045746; duration=0sec 2024-11-26T10:30:45,775 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:45,775 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:30:45,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:45,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:46,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:46,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:47,103 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-26T10:30:47,103 INFO [master/94eedbb855cf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-26T10:30:47,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:47,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:48,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:48,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:49,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:49,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:50,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:50,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:51,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:51,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:52,048 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-26T10:30:52,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:52,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:53,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:53,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:54,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:54,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:55,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-26T10:30:55,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ecae1deea71f44f989809ffa779f6fe3 is 1080, key is row0146/info:/1732617045725/Put/seqid=0 2024-11-26T10:30:55,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741862_1038 (size=20078) 2024-11-26T10:30:55,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741862_1038 (size=20078) 2024-11-26T10:30:55,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ecae1deea71f44f989809ffa779f6fe3 2024-11-26T10:30:55,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/ecae1deea71f44f989809ffa779f6fe3 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ecae1deea71f44f989809ffa779f6fe3 2024-11-26T10:30:55,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ecae1deea71f44f989809ffa779f6fe3, entries=14, sequenceid=207, filesize=19.6 K 2024-11-26T10:30:55,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 49b94c9ac03f0f74a86a3751fc04740a in 21ms, sequenceid=207, compaction requested=false 2024-11-26T10:30:55,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:55,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:55,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:56,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:56,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:57,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:30:57,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/70b6a0f8aed848d0bf7ab9255c78798d is 1080, key is row0160/info:/1732617055787/Put/seqid=0 2024-11-26T10:30:57,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741863_1039 (size=12516) 2024-11-26T10:30:57,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741863_1039 (size=12516) 2024-11-26T10:30:57,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/70b6a0f8aed848d0bf7ab9255c78798d 2024-11-26T10:30:57,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/70b6a0f8aed848d0bf7ab9255c78798d as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/70b6a0f8aed848d0bf7ab9255c78798d 2024-11-26T10:30:57,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/70b6a0f8aed848d0bf7ab9255c78798d, entries=7, sequenceid=217, filesize=12.2 K 2024-11-26T10:30:57,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 49b94c9ac03f0f74a86a3751fc04740a in 21ms, sequenceid=217, compaction requested=true 2024-11-26T10:30:57,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:57,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:30:57,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:57,818 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:30:57,819 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:30:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:57,820 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:30:57,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-26T10:30:57,820 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:30:57,820 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ed079d325ea6414bb7d6e3589366fe8c, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ecae1deea71f44f989809ffa779f6fe3, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/70b6a0f8aed848d0bf7ab9255c78798d] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=125.8 K 2024-11-26T10:30:57,820 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed079d325ea6414bb7d6e3589366fe8c, keycount=84, bloomtype=ROW, size=94.0 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1732617029510 2024-11-26T10:30:57,820 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting ecae1deea71f44f989809ffa779f6fe3, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732617045725 2024-11-26T10:30:57,821 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70b6a0f8aed848d0bf7ab9255c78798d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732617055787 2024-11-26T10:30:57,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/75abdda4cb54477fb466c937adc78a16 is 1080, key is row0167/info:/1732617057798/Put/seqid=0 2024-11-26T10:30:57,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741864_1040 (size=21156) 2024-11-26T10:30:57,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741864_1040 (size=21156) 2024-11-26T10:30:57,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/75abdda4cb54477fb466c937adc78a16 2024-11-26T10:30:57,840 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#79 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:30:57,840 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/bf4c549b4f0c44788bc07b588058db69 is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:30:57,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/75abdda4cb54477fb466c937adc78a16 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75abdda4cb54477fb466c937adc78a16 2024-11-26T10:30:57,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75abdda4cb54477fb466c937adc78a16, entries=15, sequenceid=235, filesize=20.7 K 2024-11-26T10:30:57,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 49b94c9ac03f0f74a86a3751fc04740a in 28ms, sequenceid=235, compaction requested=false 2024-11-26T10:30:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:57,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741865_1041 (size=118996) 2024-11-26T10:30:57,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741865_1041 (size=118996) 2024-11-26T10:30:57,854 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/bf4c549b4f0c44788bc07b588058db69 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bf4c549b4f0c44788bc07b588058db69 2024-11-26T10:30:57,860 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into bf4c549b4f0c44788bc07b588058db69(size=116.2 K), total size for store is 136.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:30:57,860 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:30:57,860 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=13, startTime=1732617057818; duration=0sec 2024-11-26T10:30:57,860 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:30:57,860 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:30:57,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:57,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:58,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:58,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:30:59,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-26T10:30:59,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/c25d53a6ff7c4652993de2446e5d64fd is 1080, key is row0182/info:/1732617057820/Put/seqid=0 2024-11-26T10:30:59,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741866_1042 (size=17906) 2024-11-26T10:30:59,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741866_1042 (size=17906) 2024-11-26T10:30:59,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-26T10:30:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33544 deadline: 1732617069863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 2024-11-26T10:30:59,864 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:59,864 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:30:59,864 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 because the exception is null or not the one we care about 2024-11-26T10:30:59,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:30:59,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:00,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/c25d53a6ff7c4652993de2446e5d64fd 2024-11-26T10:31:00,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/c25d53a6ff7c4652993de2446e5d64fd as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/c25d53a6ff7c4652993de2446e5d64fd 2024-11-26T10:31:00,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/c25d53a6ff7c4652993de2446e5d64fd, entries=12, sequenceid=251, filesize=17.5 K 2024-11-26T10:31:00,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 49b94c9ac03f0f74a86a3751fc04740a in 422ms, sequenceid=251, compaction requested=true 2024-11-26T10:31:00,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:00,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:31:00,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:31:00,259 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:31:00,260 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:31:00,260 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:31:00,260 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:00,260 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bf4c549b4f0c44788bc07b588058db69, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75abdda4cb54477fb466c937adc78a16, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/c25d53a6ff7c4652993de2446e5d64fd] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=154.4 K 2024-11-26T10:31:00,261 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf4c549b4f0c44788bc07b588058db69, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732617029510 2024-11-26T10:31:00,261 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 75abdda4cb54477fb466c937adc78a16, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732617057798 2024-11-26T10:31:00,261 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting c25d53a6ff7c4652993de2446e5d64fd, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617057820 2024-11-26T10:31:00,271 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#81 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:31:00,272 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/dbbe4bec68af4b588ce0cb17db9fae73 is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:31:00,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741867_1043 (size=148409) 2024-11-26T10:31:00,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741867_1043 (size=148409) 2024-11-26T10:31:00,280 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/dbbe4bec68af4b588ce0cb17db9fae73 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/dbbe4bec68af4b588ce0cb17db9fae73 2024-11-26T10:31:00,285 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into dbbe4bec68af4b588ce0cb17db9fae73(size=144.9 K), total size for store is 144.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:31:00,285 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:00,286 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=13, startTime=1732617060259; duration=0sec 2024-11-26T10:31:00,286 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:31:00,286 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:31:00,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:00,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:01,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:01,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:02,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:02,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:03,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:03,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:04,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:04,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:05,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:05,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:06,194 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:31:06,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:06,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:07,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:07,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:08,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:08,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:31:09,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-26T10:31:09,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/30559cb5bacb40a591c9039dab77d688 is 1080, key is row0194/info:/1732617059838/Put/seqid=0 2024-11-26T10:31:09,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741868_1044 (size=24409) 2024-11-26T10:31:09,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741868_1044 (size=24409) 2024-11-26T10:31:09,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/30559cb5bacb40a591c9039dab77d688 2024-11-26T10:31:09,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/30559cb5bacb40a591c9039dab77d688 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/30559cb5bacb40a591c9039dab77d688 2024-11-26T10:31:09,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-26T10:31:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33544 deadline: 1732617079893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 2024-11-26T10:31:09,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:31:09,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=49b94c9ac03f0f74a86a3751fc04740a, server=94eedbb855cf,46389,1732617006271 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-26T10:31:09,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., hostname=94eedbb855cf,46389,1732617006271, seqNum=127 because the exception is null or not the one we care about 2024-11-26T10:31:09,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/30559cb5bacb40a591c9039dab77d688, entries=18, sequenceid=273, filesize=23.8 K 2024-11-26T10:31:09,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 49b94c9ac03f0f74a86a3751fc04740a in 22ms, sequenceid=273, compaction requested=false 2024-11-26T10:31:09,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:09,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:09,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:10,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:10,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:11,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:11,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:12,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:12,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:13,679 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=68, reuseRatio=88.31% 2024-11-26T10:31:13,679 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-26T10:31:13,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:13,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:14,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:14,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:15,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:15,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:16,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:16,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:17,225 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d5d2b6bf03ac3f8189ad38bcc867036a, had cached 0 bytes from a total of 70862 2024-11-26T10:31:17,246 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 49b94c9ac03f0f74a86a3751fc04740a, had cached 0 bytes from a total of 172818 2024-11-26T10:31:17,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:17,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:18,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:18,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:19,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:19,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:31:19,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-26T10:31:20,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/9f065c948e0a479797afbff8c3bd3c37 is 1080, key is row0212/info:/1732617069876/Put/seqid=0 2024-11-26T10:31:20,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741869_1045 (size=17918) 2024-11-26T10:31:20,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741869_1045 (size=17918) 2024-11-26T10:31:20,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/9f065c948e0a479797afbff8c3bd3c37 2024-11-26T10:31:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/9f065c948e0a479797afbff8c3bd3c37 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/9f065c948e0a479797afbff8c3bd3c37 2024-11-26T10:31:20,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/9f065c948e0a479797afbff8c3bd3c37, entries=12, sequenceid=288, filesize=17.5 K 2024-11-26T10:31:20,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 49b94c9ac03f0f74a86a3751fc04740a in 21ms, sequenceid=288, compaction requested=true 2024-11-26T10:31:20,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:20,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:31:20,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:31:20,016 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:31:20,017 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190736 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:31:20,017 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:31:20,017 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:20,017 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/dbbe4bec68af4b588ce0cb17db9fae73, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/30559cb5bacb40a591c9039dab77d688, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/9f065c948e0a479797afbff8c3bd3c37] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=186.3 K 2024-11-26T10:31:20,018 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting dbbe4bec68af4b588ce0cb17db9fae73, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617029510 2024-11-26T10:31:20,018 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 30559cb5bacb40a591c9039dab77d688, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732617059838 2024-11-26T10:31:20,018 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f065c948e0a479797afbff8c3bd3c37, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732617069876 2024-11-26T10:31:20,030 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#84 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:31:20,030 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/b9ee31cbe1ad40c8916d3e3a103e18d4 is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:31:20,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741870_1046 (size=180886) 2024-11-26T10:31:20,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741870_1046 (size=180886) 2024-11-26T10:31:20,037 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/b9ee31cbe1ad40c8916d3e3a103e18d4 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/b9ee31cbe1ad40c8916d3e3a103e18d4 2024-11-26T10:31:20,042 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into b9ee31cbe1ad40c8916d3e3a103e18d4(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:31:20,042 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:20,042 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=13, startTime=1732617080016; duration=0sec 2024-11-26T10:31:20,042 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:31:20,042 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:31:20,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,833 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-26T10:31:20,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-26T10:31:20,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:20,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:21,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:21,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:22,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:31:22,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-26T10:31:22,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/4ccc356cd4194aacb3a9d8efe9fff7f0 is 1080, key is row0224/info:/1732617079996/Put/seqid=0 2024-11-26T10:31:22,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741871_1047 (size=12523) 2024-11-26T10:31:22,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741871_1047 (size=12523) 2024-11-26T10:31:22,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/4ccc356cd4194aacb3a9d8efe9fff7f0 2024-11-26T10:31:22,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/4ccc356cd4194aacb3a9d8efe9fff7f0 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/4ccc356cd4194aacb3a9d8efe9fff7f0 2024-11-26T10:31:22,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/4ccc356cd4194aacb3a9d8efe9fff7f0, entries=7, sequenceid=299, filesize=12.2 K 2024-11-26T10:31:22,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 49b94c9ac03f0f74a86a3751fc04740a in 21ms, sequenceid=299, compaction requested=false 2024-11-26T10:31:22,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:22,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] regionserver.HRegion(8855): Flush requested on 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:31:22,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-26T10:31:22,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/acb31b66e4d84268a22b707a9c4ba11b is 1080, key is row0231/info:/1732617082006/Put/seqid=0 2024-11-26T10:31:22,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741872_1048 (size=20092) 2024-11-26T10:31:22,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741872_1048 (size=20092) 2024-11-26T10:31:22,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/acb31b66e4d84268a22b707a9c4ba11b 2024-11-26T10:31:22,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/acb31b66e4d84268a22b707a9c4ba11b as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/acb31b66e4d84268a22b707a9c4ba11b 2024-11-26T10:31:22,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/acb31b66e4d84268a22b707a9c4ba11b, entries=14, sequenceid=316, filesize=19.6 K 2024-11-26T10:31:22,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 49b94c9ac03f0f74a86a3751fc04740a in 21ms, sequenceid=316, compaction requested=true 2024-11-26T10:31:22,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:22,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49b94c9ac03f0f74a86a3751fc04740a:info, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:31:22,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:31:22,048 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:31:22,049 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 213501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:31:22,049 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1541): 49b94c9ac03f0f74a86a3751fc04740a/info is initiating minor compaction (all files) 2024-11-26T10:31:22,049 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49b94c9ac03f0f74a86a3751fc04740a/info in TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:22,049 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/b9ee31cbe1ad40c8916d3e3a103e18d4, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/4ccc356cd4194aacb3a9d8efe9fff7f0, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/acb31b66e4d84268a22b707a9c4ba11b] into tmpdir=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp, totalSize=208.5 K 2024-11-26T10:31:22,050 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9ee31cbe1ad40c8916d3e3a103e18d4, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732617029510 2024-11-26T10:31:22,050 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ccc356cd4194aacb3a9d8efe9fff7f0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732617079996 2024-11-26T10:31:22,050 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] compactions.Compactor(225): Compacting acb31b66e4d84268a22b707a9c4ba11b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732617082006 2024-11-26T10:31:22,060 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49b94c9ac03f0f74a86a3751fc04740a#info#compaction#87 average throughput is 93.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:31:22,061 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/589f96b31dca4935ab469e205d70c0db is 1080, key is row0062/info:/1732617029510/Put/seqid=0 2024-11-26T10:31:22,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741873_1049 (size=203655) 2024-11-26T10:31:22,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741873_1049 (size=203655) 2024-11-26T10:31:22,068 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/589f96b31dca4935ab469e205d70c0db as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/589f96b31dca4935ab469e205d70c0db 2024-11-26T10:31:22,074 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49b94c9ac03f0f74a86a3751fc04740a/info of 49b94c9ac03f0f74a86a3751fc04740a into 589f96b31dca4935ab469e205d70c0db(size=198.9 K), total size for store is 198.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:31:22,074 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:22,074 INFO [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a., storeName=49b94c9ac03f0f74a86a3751fc04740a/info, priority=13, startTime=1732617082048; duration=0sec 2024-11-26T10:31:22,074 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:31:22,074 DEBUG [RS:0;94eedbb855cf:46389-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49b94c9ac03f0f74a86a3751fc04740a:info 2024-11-26T10:31:22,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:22,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:23,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:23,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:24,045 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-26T10:31:24,046 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46389%2C1732617006271.1732617084046 2024-11-26T10:31:24,062 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,062 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,062 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,062 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,062 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,063 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617006674 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617084046 2024-11-26T10:31:24,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42675:42675),(127.0.0.1/127.0.0.1:44661:44661)] 2024-11-26T10:31:24,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617006674 is not closed yet, will try archiving it next time 2024-11-26T10:31:24,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741833_1009 (size=315283) 2024-11-26T10:31:24,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741833_1009 (size=315283) 2024-11-26T10:31:24,067 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d5d2b6bf03ac3f8189ad38bcc867036a: 2024-11-26T10:31:24,067 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-26T10:31:24,071 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/info/f747dd79ba334ec58fdb9983e1b78b06 is 193, key is TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a./info:regioninfo/1732617032290/Put/seqid=0 2024-11-26T10:31:24,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741875_1051 (size=6223) 2024-11-26T10:31:24,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741875_1051 (size=6223) 2024-11-26T10:31:24,075 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/info/f747dd79ba334ec58fdb9983e1b78b06 2024-11-26T10:31:24,080 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/.tmp/info/f747dd79ba334ec58fdb9983e1b78b06 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/info/f747dd79ba334ec58fdb9983e1b78b06 2024-11-26T10:31:24,084 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/info/f747dd79ba334ec58fdb9983e1b78b06, entries=5, sequenceid=21, filesize=6.1 K 2024-11-26T10:31:24,085 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-26T10:31:24,085 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-26T10:31:24,085 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 49b94c9ac03f0f74a86a3751fc04740a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-26T10:31:24,089 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/6c67f1c64df14c63a1b70f9c660a390b is 1080, key is row0245/info:/1732617082028/Put/seqid=0 2024-11-26T10:31:24,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741876_1052 (size=17918) 2024-11-26T10:31:24,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741876_1052 (size=17918) 2024-11-26T10:31:24,093 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/6c67f1c64df14c63a1b70f9c660a390b 2024-11-26T10:31:24,098 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/.tmp/info/6c67f1c64df14c63a1b70f9c660a390b as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/6c67f1c64df14c63a1b70f9c660a390b 2024-11-26T10:31:24,102 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/6c67f1c64df14c63a1b70f9c660a390b, entries=12, sequenceid=332, filesize=17.5 K 2024-11-26T10:31:24,102 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 49b94c9ac03f0f74a86a3751fc04740a in 17ms, sequenceid=332, compaction requested=false 2024-11-26T10:31:24,102 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 49b94c9ac03f0f74a86a3751fc04740a: 2024-11-26T10:31:24,103 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C46389%2C1732617006271.1732617084103 2024-11-26T10:31:24,107 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,107 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,107 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,107 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,107 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,108 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617084046 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617084103 2024-11-26T10:31:24,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741874_1050 (size=731) 2024-11-26T10:31:24,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741874_1050 (size=731) 2024-11-26T10:31:24,110 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44661:44661),(127.0.0.1/127.0.0.1:42675:42675)] 2024-11-26T10:31:24,110 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617006674 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/oldWALs/94eedbb855cf%2C46389%2C1732617006271.1732617006674 2024-11-26T10:31:24,111 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-26T10:31:24,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:31:24,111 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:31:24,111 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:31:24,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:24,111 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/WALs/94eedbb855cf,46389,1732617006271/94eedbb855cf%2C46389%2C1732617006271.1732617084046 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/oldWALs/94eedbb855cf%2C46389%2C1732617006271.1732617084046 2024-11-26T10:31:24,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:24,111 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:31:24,112 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:31:24,112 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1103943411, stopped=false 2024-11-26T10:31:24,112 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,34641,1732617006212 2024-11-26T10:31:24,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:31:24,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:31:24,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:24,113 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:31:24,114 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:31:24,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:24,114 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:31:24,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:24,114 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,46389,1732617006271' ***** 2024-11-26T10:31:24,114 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:31:24,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:31:24,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:31:24,115 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(3091): Received CLOSE for d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(3091): Received CLOSE for 49b94c9ac03f0f74a86a3751fc04740a 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,46389,1732617006271 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:46389. 2024-11-26T10:31:24,115 DEBUG [RS:0;94eedbb855cf:46389 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:31:24,115 DEBUG [RS:0;94eedbb855cf:46389 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:24,115 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d5d2b6bf03ac3f8189ad38bcc867036a, disabling compactions & flushes 2024-11-26T10:31:24,115 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:31:24,115 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:31:24,115 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:31:24,116 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:31:24,116 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. after waiting 0 ms 2024-11-26T10:31:24,116 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:31:24,116 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-26T10:31:24,116 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1325): Online Regions={d5d2b6bf03ac3f8189ad38bcc867036a=TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a., 1588230740=hbase:meta,,1.1588230740, 49b94c9ac03f0f74a86a3751fc04740a=TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.} 2024-11-26T10:31:24,116 DEBUG [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 49b94c9ac03f0f74a86a3751fc04740a, d5d2b6bf03ac3f8189ad38bcc867036a 2024-11-26T10:31:24,116 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:31:24,116 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:31:24,116 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:31:24,116 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:31:24,116 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:31:24,116 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60->hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b-bottom] to archive 2024-11-26T10:31:24,117 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:31:24,119 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60 2024-11-26T10:31:24,119 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=94eedbb855cf:34641 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-26T10:31:24,120 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-26T10:31:24,121 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-26T10:31:24,121 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:31:24,121 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:31:24,121 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732617084116Running coprocessor pre-close hooks at 1732617084116Disabling compacts and flushes for region at 1732617084116Disabling writes for close at 1732617084116Writing region close event to WAL at 1732617084118 (+2 ms)Running coprocessor post-close hooks at 1732617084121 (+3 ms)Closed at 1732617084121 2024-11-26T10:31:24,121 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:31:24,123 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d5d2b6bf03ac3f8189ad38bcc867036a/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-26T10:31:24,123 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:31:24,123 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d5d2b6bf03ac3f8189ad38bcc867036a: Waiting for close lock at 1732617084115Running coprocessor pre-close hooks at 1732617084115Disabling compacts and flushes for region at 1732617084115Disabling writes for close at 1732617084116 (+1 ms)Writing region close event to WAL at 1732617084120 (+4 ms)Running coprocessor post-close hooks at 1732617084123 (+3 ms)Closed at 1732617084123 2024-11-26T10:31:24,123 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732617031585.d5d2b6bf03ac3f8189ad38bcc867036a. 2024-11-26T10:31:24,123 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 49b94c9ac03f0f74a86a3751fc04740a, disabling compactions & flushes 2024-11-26T10:31:24,123 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:24,124 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:24,124 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. after waiting 0 ms 2024-11-26T10:31:24,124 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:24,124 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60->hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/d718d021ae6be4df36112e62afd41f60/info/2212a186639b4a5fb403255fc175689b-top, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75145a24fbd04c28987130a694f4fdaf, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ac6ea7c15edb444186877dbdecb3a74f, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/77100680eb6a4b7ca82e39a646841495, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bcabaa160fe94d16ad61afe48f2df7eb, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/25f251085d52422eaa7853c668408e74, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ed079d325ea6414bb7d6e3589366fe8c, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/84a7c4fcec8f436cabd56d056925f406, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ecae1deea71f44f989809ffa779f6fe3, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bf4c549b4f0c44788bc07b588058db69, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/70b6a0f8aed848d0bf7ab9255c78798d, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75abdda4cb54477fb466c937adc78a16, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/dbbe4bec68af4b588ce0cb17db9fae73, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/c25d53a6ff7c4652993de2446e5d64fd, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/30559cb5bacb40a591c9039dab77d688, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/b9ee31cbe1ad40c8916d3e3a103e18d4, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/9f065c948e0a479797afbff8c3bd3c37, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/4ccc356cd4194aacb3a9d8efe9fff7f0, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/acb31b66e4d84268a22b707a9c4ba11b] to archive 2024-11-26T10:31:24,125 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:31:24,126 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/2212a186639b4a5fb403255fc175689b.d718d021ae6be4df36112e62afd41f60 2024-11-26T10:31:24,127 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-47d42f898b51434b88fdd084c2a0f712 2024-11-26T10:31:24,129 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-3fe02274e3fd4105a4fd09c6695a1201 2024-11-26T10:31:24,130 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75145a24fbd04c28987130a694f4fdaf to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75145a24fbd04c28987130a694f4fdaf 2024-11-26T10:31:24,131 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/TestLogRolling-testLogRolling=d718d021ae6be4df36112e62afd41f60-736116411f334238ac38fc3bcba24bda 2024-11-26T10:31:24,132 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ac6ea7c15edb444186877dbdecb3a74f to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ac6ea7c15edb444186877dbdecb3a74f 2024-11-26T10:31:24,133 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/77100680eb6a4b7ca82e39a646841495 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/77100680eb6a4b7ca82e39a646841495 2024-11-26T10:31:24,134 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bcabaa160fe94d16ad61afe48f2df7eb to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bcabaa160fe94d16ad61afe48f2df7eb 2024-11-26T10:31:24,135 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/25f251085d52422eaa7853c668408e74 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/25f251085d52422eaa7853c668408e74 2024-11-26T10:31:24,136 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ed079d325ea6414bb7d6e3589366fe8c to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ed079d325ea6414bb7d6e3589366fe8c 2024-11-26T10:31:24,137 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/84a7c4fcec8f436cabd56d056925f406 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/84a7c4fcec8f436cabd56d056925f406 2024-11-26T10:31:24,138 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ecae1deea71f44f989809ffa779f6fe3 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/ecae1deea71f44f989809ffa779f6fe3 2024-11-26T10:31:24,139 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bf4c549b4f0c44788bc07b588058db69 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/bf4c549b4f0c44788bc07b588058db69 2024-11-26T10:31:24,140 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/70b6a0f8aed848d0bf7ab9255c78798d to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/70b6a0f8aed848d0bf7ab9255c78798d 2024-11-26T10:31:24,141 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75abdda4cb54477fb466c937adc78a16 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/75abdda4cb54477fb466c937adc78a16 2024-11-26T10:31:24,142 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/dbbe4bec68af4b588ce0cb17db9fae73 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/dbbe4bec68af4b588ce0cb17db9fae73 2024-11-26T10:31:24,143 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/c25d53a6ff7c4652993de2446e5d64fd to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/c25d53a6ff7c4652993de2446e5d64fd 2024-11-26T10:31:24,144 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/30559cb5bacb40a591c9039dab77d688 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/30559cb5bacb40a591c9039dab77d688 2024-11-26T10:31:24,145 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/b9ee31cbe1ad40c8916d3e3a103e18d4 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/b9ee31cbe1ad40c8916d3e3a103e18d4 2024-11-26T10:31:24,146 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/9f065c948e0a479797afbff8c3bd3c37 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/9f065c948e0a479797afbff8c3bd3c37 2024-11-26T10:31:24,147 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/4ccc356cd4194aacb3a9d8efe9fff7f0 to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/4ccc356cd4194aacb3a9d8efe9fff7f0 2024-11-26T10:31:24,148 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/acb31b66e4d84268a22b707a9c4ba11b to hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/archive/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/info/acb31b66e4d84268a22b707a9c4ba11b 2024-11-26T10:31:24,149 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [75145a24fbd04c28987130a694f4fdaf=43081, ac6ea7c15edb444186877dbdecb3a74f=12516, 77100680eb6a4b7ca82e39a646841495=65889, bcabaa160fe94d16ad61afe48f2df7eb=20078, 25f251085d52422eaa7853c668408e74=17906, ed079d325ea6414bb7d6e3589366fe8c=96252, 84a7c4fcec8f436cabd56d056925f406=22238, ecae1deea71f44f989809ffa779f6fe3=20078, bf4c549b4f0c44788bc07b588058db69=118996, 70b6a0f8aed848d0bf7ab9255c78798d=12516, 75abdda4cb54477fb466c937adc78a16=21156, dbbe4bec68af4b588ce0cb17db9fae73=148409, c25d53a6ff7c4652993de2446e5d64fd=17906, 30559cb5bacb40a591c9039dab77d688=24409, b9ee31cbe1ad40c8916d3e3a103e18d4=180886, 9f065c948e0a479797afbff8c3bd3c37=17918, 4ccc356cd4194aacb3a9d8efe9fff7f0=12523, acb31b66e4d84268a22b707a9c4ba11b=20092] 2024-11-26T10:31:24,152 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/data/default/TestLogRolling-testLogRolling/49b94c9ac03f0f74a86a3751fc04740a/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-26T10:31:24,152 INFO [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:24,152 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 49b94c9ac03f0f74a86a3751fc04740a: Waiting for close lock at 1732617084123Running coprocessor pre-close hooks at 1732617084123Disabling compacts and flushes for region at 1732617084123Disabling writes for close at 1732617084124 (+1 ms)Writing region close event to WAL at 1732617084149 (+25 ms)Running coprocessor post-close hooks at 1732617084152 (+3 ms)Closed at 1732617084152 2024-11-26T10:31:24,152 DEBUG [RS_CLOSE_REGION-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732617031585.49b94c9ac03f0f74a86a3751fc04740a. 2024-11-26T10:31:24,316 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,46389,1732617006271; all regions closed. 2024-11-26T10:31:24,316 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,317 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,317 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,317 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,317 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741834_1010 (size=8107) 2024-11-26T10:31:24,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741834_1010 (size=8107) 2024-11-26T10:31:24,321 DEBUG [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/oldWALs 2024-11-26T10:31:24,321 INFO [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C46389%2C1732617006271.meta:.meta(num 1732617007037) 2024-11-26T10:31:24,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,321 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741877_1053 (size=780) 2024-11-26T10:31:24,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741877_1053 (size=780) 2024-11-26T10:31:24,325 DEBUG [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/oldWALs 2024-11-26T10:31:24,325 INFO [RS:0;94eedbb855cf:46389 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C46389%2C1732617006271:(num 1732617084103) 2024-11-26T10:31:24,325 DEBUG [RS:0;94eedbb855cf:46389 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:24,325 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:31:24,325 INFO [RS:0;94eedbb855cf:46389 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:31:24,325 INFO [RS:0;94eedbb855cf:46389 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-26T10:31:24,326 INFO [RS:0;94eedbb855cf:46389 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:31:24,326 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:31:24,326 INFO [RS:0;94eedbb855cf:46389 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46389 2024-11-26T10:31:24,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:31:24,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,46389,1732617006271 2024-11-26T10:31:24,328 INFO [RS:0;94eedbb855cf:46389 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:31:24,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,46389,1732617006271] 2024-11-26T10:31:24,331 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,46389,1732617006271 already deleted, retry=false 2024-11-26T10:31:24,331 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,46389,1732617006271 expired; onlineServers=0 2024-11-26T10:31:24,331 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,34641,1732617006212' ***** 2024-11-26T10:31:24,331 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:31:24,332 DEBUG [M:0;94eedbb855cf:34641 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:31:24,332 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:31:24,332 DEBUG [M:0;94eedbb855cf:34641 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:31:24,332 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732617006432 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732617006432,5,FailOnTimeoutGroup] 2024-11-26T10:31:24,332 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732617006429 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732617006429,5,FailOnTimeoutGroup] 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:31:24,332 DEBUG [M:0;94eedbb855cf:34641 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:31:24,332 INFO [M:0;94eedbb855cf:34641 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:31:24,333 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:31:24,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:31:24,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:24,333 DEBUG [M:0;94eedbb855cf:34641 {}] zookeeper.ZKUtil(347): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:31:24,333 WARN [M:0;94eedbb855cf:34641 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:31:24,334 INFO [M:0;94eedbb855cf:34641 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/.lastflushedseqids 2024-11-26T10:31:24,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741878_1054 (size=228) 2024-11-26T10:31:24,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741878_1054 (size=228) 2024-11-26T10:31:24,339 INFO [M:0;94eedbb855cf:34641 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:31:24,339 INFO [M:0;94eedbb855cf:34641 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:31:24,340 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:31:24,340 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:24,340 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:24,340 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:31:24,340 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:24,340 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.68 KB heapSize=65.90 KB 2024-11-26T10:31:24,355 DEBUG [M:0;94eedbb855cf:34641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ef3665d5f34242bb83419840594e6cf3 is 82, key is hbase:meta,,1/info:regioninfo/1732617007065/Put/seqid=0 2024-11-26T10:31:24,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741879_1055 (size=5672) 2024-11-26T10:31:24,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741879_1055 (size=5672) 2024-11-26T10:31:24,360 INFO [M:0;94eedbb855cf:34641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ef3665d5f34242bb83419840594e6cf3 2024-11-26T10:31:24,383 DEBUG [M:0;94eedbb855cf:34641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/565f97c1d256418f80dcc54a566c03a5 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732617007570/Put/seqid=0 2024-11-26T10:31:24,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741880_1056 (size=7679) 2024-11-26T10:31:24,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741880_1056 (size=7679) 2024-11-26T10:31:24,388 INFO [M:0;94eedbb855cf:34641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.08 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/565f97c1d256418f80dcc54a566c03a5 2024-11-26T10:31:24,393 INFO [M:0;94eedbb855cf:34641 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 565f97c1d256418f80dcc54a566c03a5 2024-11-26T10:31:24,406 DEBUG [M:0;94eedbb855cf:34641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/be5bb60c358a4858892d1d57fa5042eb is 69, key is 94eedbb855cf,46389,1732617006271/rs:state/1732617006506/Put/seqid=0 2024-11-26T10:31:24,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741881_1057 (size=5156) 2024-11-26T10:31:24,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741881_1057 (size=5156) 2024-11-26T10:31:24,411 INFO [M:0;94eedbb855cf:34641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/be5bb60c358a4858892d1d57fa5042eb 2024-11-26T10:31:24,428 DEBUG [M:0;94eedbb855cf:34641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/105ad67720a84f959d4d9dda4e7a9c51 is 52, key is load_balancer_on/state:d/1732617007197/Put/seqid=0 2024-11-26T10:31:24,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:24,430 INFO [RS:0;94eedbb855cf:46389 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:31:24,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46389-0x10153d357040001, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:24,430 INFO [RS:0;94eedbb855cf:46389 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,46389,1732617006271; zookeeper connection closed. 2024-11-26T10:31:24,430 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@62723a8b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@62723a8b 2024-11-26T10:31:24,430 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:31:24,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741882_1058 (size=5056) 2024-11-26T10:31:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741882_1058 (size=5056) 2024-11-26T10:31:24,433 INFO [M:0;94eedbb855cf:34641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/105ad67720a84f959d4d9dda4e7a9c51 2024-11-26T10:31:24,437 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ef3665d5f34242bb83419840594e6cf3 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ef3665d5f34242bb83419840594e6cf3 2024-11-26T10:31:24,441 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ef3665d5f34242bb83419840594e6cf3, entries=8, sequenceid=129, filesize=5.5 K 2024-11-26T10:31:24,442 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/565f97c1d256418f80dcc54a566c03a5 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/565f97c1d256418f80dcc54a566c03a5 2024-11-26T10:31:24,446 INFO [M:0;94eedbb855cf:34641 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 565f97c1d256418f80dcc54a566c03a5 2024-11-26T10:31:24,446 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/565f97c1d256418f80dcc54a566c03a5, entries=14, sequenceid=129, filesize=7.5 K 2024-11-26T10:31:24,446 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/be5bb60c358a4858892d1d57fa5042eb as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/be5bb60c358a4858892d1d57fa5042eb 2024-11-26T10:31:24,450 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/be5bb60c358a4858892d1d57fa5042eb, entries=1, sequenceid=129, filesize=5.0 K 2024-11-26T10:31:24,451 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/105ad67720a84f959d4d9dda4e7a9c51 as hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/105ad67720a84f959d4d9dda4e7a9c51 2024-11-26T10:31:24,454 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32975/user/jenkins/test-data/d77a6f70-46d3-1e91-91e5-994d10c3b6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/105ad67720a84f959d4d9dda4e7a9c51, entries=1, sequenceid=129, filesize=4.9 K 2024-11-26T10:31:24,455 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=129, compaction requested=false 2024-11-26T10:31:24,456 INFO [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:24,456 DEBUG [M:0;94eedbb855cf:34641 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732617084339Disabling compacts and flushes for region at 1732617084339Disabling writes for close at 1732617084340 (+1 ms)Obtaining lock to block concurrent updates at 1732617084340Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732617084340Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54973, getHeapSize=67416, getOffHeapSize=0, getCellsCount=152 at 1732617084340Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732617084341 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732617084341Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732617084355 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732617084355Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732617084365 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732617084383 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732617084383Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732617084393 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732617084406 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732617084406Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732617084415 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732617084428 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732617084428Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68ffbb87: reopening flushed file at 1732617084437 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21272ba6: reopening flushed file at 1732617084441 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@578dffc8: reopening flushed file at 1732617084446 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f8791bf: reopening flushed file at 1732617084450 (+4 ms)Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=129, compaction requested=false at 1732617084455 (+5 ms)Writing region close event to WAL at 1732617084456 (+1 ms)Closed at 1732617084456 2024-11-26T10:31:24,457 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,457 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,457 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,457 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,457 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:24,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33803 is added to blk_1073741830_1006 (size=63903) 2024-11-26T10:31:24,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741830_1006 (size=63903) 2024-11-26T10:31:24,460 INFO [M:0;94eedbb855cf:34641 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:31:24,460 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:31:24,460 INFO [M:0;94eedbb855cf:34641 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34641 2024-11-26T10:31:24,460 INFO [M:0;94eedbb855cf:34641 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:31:24,528 INFO [regionserver/94eedbb855cf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:31:24,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:24,562 INFO [M:0;94eedbb855cf:34641 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:31:24,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34641-0x10153d357040000, quorum=127.0.0.1:58623, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:24,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18d1ee92{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:31:24,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78512cf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:31:24,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:31:24,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41a74ab6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:31:24,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64d2170c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir/,STOPPED} 2024-11-26T10:31:24,566 WARN [BP-493305062-172.17.0.2-1732617005522 heartbeating to localhost/127.0.0.1:32975 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:31:24,566 WARN [BP-493305062-172.17.0.2-1732617005522 heartbeating to localhost/127.0.0.1:32975 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-493305062-172.17.0.2-1732617005522 (Datanode Uuid be912820-3b3c-4149-a752-c348069613fa) service to localhost/127.0.0.1:32975 2024-11-26T10:31:24,566 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:31:24,566 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:31:24,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data3/current/BP-493305062-172.17.0.2-1732617005522 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:24,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data4/current/BP-493305062-172.17.0.2-1732617005522 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:24,567 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:31:24,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@444d0b71{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:31:24,570 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3bc081d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:31:24,570 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:31:24,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ecf816b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:31:24,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@374d3611{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir/,STOPPED} 2024-11-26T10:31:24,571 WARN [BP-493305062-172.17.0.2-1732617005522 heartbeating to localhost/127.0.0.1:32975 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:31:24,571 WARN [BP-493305062-172.17.0.2-1732617005522 heartbeating to localhost/127.0.0.1:32975 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-493305062-172.17.0.2-1732617005522 (Datanode Uuid d5c2a637-0358-4490-8c1b-249e2208c819) service to localhost/127.0.0.1:32975 2024-11-26T10:31:24,571 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:31:24,571 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:31:24,572 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data1/current/BP-493305062-172.17.0.2-1732617005522 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:24,572 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/cluster_aa9225f6-ad12-976e-8571-d24e28864d36/data/data2/current/BP-493305062-172.17.0.2-1732617005522 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:24,572 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:31:24,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@319ddf98{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:31:24,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371dfe16{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:31:24,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:31:24,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a0fc9ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:31:24,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e03861{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir/,STOPPED} 2024-11-26T10:31:24,585 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:31:24,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:31:24,624 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=228 (was 209) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32975 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:32975 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32975 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32975 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32975 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=51 (was 86), ProcessCount=11 (was 11), AvailableMemoryMB=6034 (was 6114) 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=228, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=51, ProcessCount=11, AvailableMemoryMB=6034 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.log.dir so I do NOT create it in target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a215d4-0a7b-6260-7ac6-20fd1da6c15f/hadoop.tmp.dir so I do NOT create it in target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2, deleteOnExit=true 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/test.cache.data in system properties and HBase conf 2024-11-26T10:31:24,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-26T10:31:24,633 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:31:24,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:31:24,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:31:24,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:31:24,646 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:31:24,702 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:31:24,705 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:31:24,706 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:31:24,706 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:31:24,706 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:31:24,707 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:31:24,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73544558{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:31:24,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61290c18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:31:24,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60c41d21{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/java.io.tmpdir/jetty-localhost-39301-hadoop-hdfs-3_4_1-tests_jar-_-any-15029346606968519166/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:31:24,822 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3637fc70{HTTP/1.1, (http/1.1)}{localhost:39301} 2024-11-26T10:31:24,822 INFO [Time-limited test {}] server.Server(415): Started @316969ms 2024-11-26T10:31:24,834 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-26T10:31:24,884 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:31:24,886 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:31:24,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:31:24,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:31:24,887 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:31:24,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a2ae1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:31:24,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2482618b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:31:24,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:24,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:25,000 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d664f93{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/java.io.tmpdir/jetty-localhost-45809-hadoop-hdfs-3_4_1-tests_jar-_-any-6127291166554631490/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:31:25,001 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@96dff09{HTTP/1.1, (http/1.1)}{localhost:45809} 2024-11-26T10:31:25,001 INFO [Time-limited test {}] server.Server(415): Started @317148ms 2024-11-26T10:31:25,002 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:31:25,031 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:31:25,033 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:31:25,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:31:25,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:31:25,034 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-26T10:31:25,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a8a2fb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:31:25,035 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@663b7fb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:31:25,091 WARN [Thread-2486 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data1/current/BP-1732810580-172.17.0.2-1732617084652/current, will proceed with Du for space computation calculation, 2024-11-26T10:31:25,092 WARN [Thread-2487 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data2/current/BP-1732810580-172.17.0.2-1732617084652/current, will proceed with Du for space computation calculation, 2024-11-26T10:31:25,107 WARN [Thread-2465 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:31:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x614b9c1c1d3a9d63 with lease ID 0x8dadb797baa45b9a: Processing first storage report for DS-e797ea62-c11b-43e6-bf31-8dd560cc98f3 from datanode DatanodeRegistration(127.0.0.1:39753, datanodeUuid=9eb222db-2d4c-4144-85f6-dff970774cd5, infoPort=42447, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652) 2024-11-26T10:31:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x614b9c1c1d3a9d63 with lease ID 0x8dadb797baa45b9a: from storage DS-e797ea62-c11b-43e6-bf31-8dd560cc98f3 node DatanodeRegistration(127.0.0.1:39753, datanodeUuid=9eb222db-2d4c-4144-85f6-dff970774cd5, infoPort=42447, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:31:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x614b9c1c1d3a9d63 with lease ID 0x8dadb797baa45b9a: Processing first storage report for DS-944b4dd6-4b18-4a31-845f-902c765d9890 from datanode DatanodeRegistration(127.0.0.1:39753, datanodeUuid=9eb222db-2d4c-4144-85f6-dff970774cd5, infoPort=42447, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652) 2024-11-26T10:31:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x614b9c1c1d3a9d63 with lease ID 0x8dadb797baa45b9a: from storage DS-944b4dd6-4b18-4a31-845f-902c765d9890 node DatanodeRegistration(127.0.0.1:39753, datanodeUuid=9eb222db-2d4c-4144-85f6-dff970774cd5, infoPort=42447, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:31:25,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ec9da7e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/java.io.tmpdir/jetty-localhost-42615-hadoop-hdfs-3_4_1-tests_jar-_-any-361354858953289823/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:31:25,151 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7acdff1a{HTTP/1.1, (http/1.1)}{localhost:42615} 2024-11-26T10:31:25,151 INFO [Time-limited test {}] server.Server(415): Started @317298ms 2024-11-26T10:31:25,152 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:31:25,257 WARN [Thread-2512 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data3/current/BP-1732810580-172.17.0.2-1732617084652/current, will proceed with Du for space computation calculation, 2024-11-26T10:31:25,257 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data4/current/BP-1732810580-172.17.0.2-1732617084652/current, will proceed with Du for space computation calculation, 2024-11-26T10:31:25,273 WARN [Thread-2501 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:31:25,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x70ddc529e1c8f390 with lease ID 0x8dadb797baa45b9b: Processing first storage report for DS-0fa448f2-1651-47a9-8072-aba089256c84 from datanode DatanodeRegistration(127.0.0.1:36573, datanodeUuid=97537d47-5a8b-48ac-adeb-882f12072389, infoPort=42523, infoSecurePort=0, ipcPort=36693, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652) 2024-11-26T10:31:25,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x70ddc529e1c8f390 with lease ID 0x8dadb797baa45b9b: from storage DS-0fa448f2-1651-47a9-8072-aba089256c84 node DatanodeRegistration(127.0.0.1:36573, datanodeUuid=97537d47-5a8b-48ac-adeb-882f12072389, infoPort=42523, infoSecurePort=0, ipcPort=36693, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:31:25,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x70ddc529e1c8f390 with lease ID 0x8dadb797baa45b9b: Processing first storage report for DS-77c380a4-1d66-49c7-9a97-6beb431c9256 from datanode DatanodeRegistration(127.0.0.1:36573, datanodeUuid=97537d47-5a8b-48ac-adeb-882f12072389, infoPort=42523, infoSecurePort=0, ipcPort=36693, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652) 2024-11-26T10:31:25,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x70ddc529e1c8f390 with lease ID 0x8dadb797baa45b9b: from storage DS-77c380a4-1d66-49c7-9a97-6beb431c9256 node DatanodeRegistration(127.0.0.1:36573, datanodeUuid=97537d47-5a8b-48ac-adeb-882f12072389, infoPort=42523, infoSecurePort=0, ipcPort=36693, storageInfo=lv=-57;cid=testClusterID;nsid=262364790;c=1732617084652), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:31:25,373 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13 2024-11-26T10:31:25,376 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/zookeeper_0, clientPort=59978, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:31:25,376 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59978 2024-11-26T10:31:25,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:31:25,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:31:25,386 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad with version=8 2024-11-26T10:31:25,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33735/user/jenkins/test-data/27220d29-02cd-fc75-47ee-850c281306db/hbase-staging 2024-11-26T10:31:25,388 INFO [Time-limited test {}] client.ConnectionUtils(128): master/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:31:25,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:31:25,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:31:25,389 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:31:25,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:31:25,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:31:25,389 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-26T10:31:25,389 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:31:25,389 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41949 2024-11-26T10:31:25,390 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41949 connecting to ZooKeeper ensemble=127.0.0.1:59978 2024-11-26T10:31:25,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419490x0, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:31:25,398 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41949-0x10153d48c4f0000 connected 2024-11-26T10:31:25,414 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:31:25,417 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad, hbase.cluster.distributed=false 2024-11-26T10:31:25,418 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:31:25,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41949 2024-11-26T10:31:25,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41949 2024-11-26T10:31:25,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41949 2024-11-26T10:31:25,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41949 2024-11-26T10:31:25,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41949 2024-11-26T10:31:25,434 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/94eedbb855cf:0 server-side Connection retries=45 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:31:25,434 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:31:25,435 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41713 2024-11-26T10:31:25,435 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41713 connecting to ZooKeeper ensemble=127.0.0.1:59978 2024-11-26T10:31:25,436 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417130x0, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:31:25,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417130x0, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:31:25,441 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41713-0x10153d48c4f0001 connected 2024-11-26T10:31:25,442 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:31:25,442 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:31:25,443 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:31:25,443 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:31:25,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41713 2024-11-26T10:31:25,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41713 2024-11-26T10:31:25,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41713 2024-11-26T10:31:25,447 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41713 2024-11-26T10:31:25,447 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41713 2024-11-26T10:31:25,458 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;94eedbb855cf:41949 2024-11-26T10:31:25,459 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:31:25,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:31:25,460 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:31:25,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,462 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:31:25,462 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/94eedbb855cf,41949,1732617085388 from backup master directory 2024-11-26T10:31:25,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:31:25,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:31:25,470 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:31:25,470 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,473 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/hbase.id] with ID: 9f6ea6a6-2165-4ae1-9944-aa9787bae552 2024-11-26T10:31:25,473 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/.tmp/hbase.id 2024-11-26T10:31:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:31:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:31:25,479 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/.tmp/hbase.id]:[hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/hbase.id] 2024-11-26T10:31:25,488 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:25,488 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-26T10:31:25,490 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-26T10:31:25,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:31:25,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:31:25,497 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:31:25,498 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:31:25,498 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:31:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:31:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:31:25,505 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store 2024-11-26T10:31:25,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:31:25,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:31:25,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:31:25,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:31:25,511 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:25,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:25,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:31:25,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:25,511 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:25,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732617085511Disabling compacts and flushes for region at 1732617085511Disabling writes for close at 1732617085511Writing region close event to WAL at 1732617085511Closed at 1732617085511 2024-11-26T10:31:25,512 WARN [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/.initializing 2024-11-26T10:31:25,512 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/WALs/94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,514 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C41949%2C1732617085388, suffix=, logDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/WALs/94eedbb855cf,41949,1732617085388, archiveDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/oldWALs, maxLogs=10 2024-11-26T10:31:25,514 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C41949%2C1732617085388.1732617085514 2024-11-26T10:31:25,518 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/WALs/94eedbb855cf,41949,1732617085388/94eedbb855cf%2C41949%2C1732617085388.1732617085514 2024-11-26T10:31:25,519 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42523:42523),(127.0.0.1/127.0.0.1:42447:42447)] 2024-11-26T10:31:25,520 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:31:25,520 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:31:25,520 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,520 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:31:25,522 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:25,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:31:25,524 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:31:25,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:31:25,525 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:31:25,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:31:25,527 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:31:25,527 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,528 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,528 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,529 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,529 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,529 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:31:25,530 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:31:25,532 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:31:25,532 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780758, jitterRate=-0.007215604186058044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:31:25,533 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732617085520Initializing all the Stores at 1732617085521 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617085521Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617085521Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617085521Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617085521Cleaning up temporary data from old regions at 1732617085529 (+8 ms)Region opened successfully at 1732617085533 (+4 ms) 2024-11-26T10:31:25,533 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:31:25,536 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e087b8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:31:25,537 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-26T10:31:25,537 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:31:25,537 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:31:25,537 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:31:25,538 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-26T10:31:25,538 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-26T10:31:25,538 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:31:25,540 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:31:25,541 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:31:25,542 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:31:25,542 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:31:25,543 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:31:25,544 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:31:25,544 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:31:25,545 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:31:25,548 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:31:25,549 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:31:25,551 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:31:25,552 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:31:25,553 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:31:25,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:31:25,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:31:25,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,556 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=94eedbb855cf,41949,1732617085388, sessionid=0x10153d48c4f0000, setting cluster-up flag (Was=false) 2024-11-26T10:31:25,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,566 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:31:25,566 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:25,575 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:31:25,575 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=94eedbb855cf,41949,1732617085388 2024-11-26T10:31:25,576 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-26T10:31:25,578 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-26T10:31:25,578 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-26T10:31:25,578 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:31:25,578 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 94eedbb855cf,41949,1732617085388 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/94eedbb855cf:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/94eedbb855cf:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:31:25,579 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,580 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732617115580 2024-11-26T10:31:25,580 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:31:25,580 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:31:25,580 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:31:25,580 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:31:25,581 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:31:25,581 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:31:25,581 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,581 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:31:25,581 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:31:25,581 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:31:25,581 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:31:25,581 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:31:25,582 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:31:25,582 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:31:25,582 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732617085582,5,FailOnTimeoutGroup] 2024-11-26T10:31:25,582 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732617085582,5,FailOnTimeoutGroup] 2024-11-26T10:31:25,582 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,582 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:31:25,582 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,582 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,582 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,582 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:31:25,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:31:25,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741831_1007 (size=1321) 2024-11-26T10:31:25,589 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-26T10:31:25,589 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad 2024-11-26T10:31:25,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:31:25,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741832_1008 (size=32) 2024-11-26T10:31:25,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:31:25,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:31:25,598 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:31:25,598 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:25,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:31:25,599 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:31:25,599 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:25,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:31:25,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:31:25,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:25,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:31:25,602 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:31:25,602 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:25,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:25,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:31:25,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740 2024-11-26T10:31:25,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740 2024-11-26T10:31:25,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:31:25,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:31:25,604 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:31:25,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:31:25,607 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:31:25,607 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815632, jitterRate=0.03713051974773407}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:31:25,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732617085596Initializing all the Stores at 1732617085596Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617085596Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617085596Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617085596Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617085596Cleaning up temporary data from old regions at 1732617085604 (+8 ms)Region opened successfully at 1732617085608 (+4 ms) 2024-11-26T10:31:25,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:31:25,608 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:31:25,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:31:25,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:31:25,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:31:25,608 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:31:25,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732617085608Disabling compacts and flushes for region at 1732617085608Disabling writes for close at 1732617085608Writing region close event to WAL at 1732617085608Closed at 1732617085608 2024-11-26T10:31:25,609 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:31:25,609 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-26T10:31:25,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:31:25,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:31:25,612 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:31:25,649 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(746): ClusterId : 9f6ea6a6-2165-4ae1-9944-aa9787bae552 2024-11-26T10:31:25,649 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:31:25,650 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:31:25,651 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:31:25,652 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:31:25,652 DEBUG [RS:0;94eedbb855cf:41713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75a6fda8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=94eedbb855cf/172.17.0.2:0 2024-11-26T10:31:25,664 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;94eedbb855cf:41713 2024-11-26T10:31:25,664 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-26T10:31:25,664 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-26T10:31:25,664 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-26T10:31:25,665 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(2659): reportForDuty to master=94eedbb855cf,41949,1732617085388 with port=41713, startcode=1732617085433 2024-11-26T10:31:25,665 DEBUG [RS:0;94eedbb855cf:41713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:31:25,667 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52319, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:31:25,668 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41949 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 94eedbb855cf,41713,1732617085433 2024-11-26T10:31:25,668 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41949 {}] master.ServerManager(517): Registering regionserver=94eedbb855cf,41713,1732617085433 2024-11-26T10:31:25,669 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad 2024-11-26T10:31:25,669 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32955 2024-11-26T10:31:25,669 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-26T10:31:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:31:25,673 DEBUG [RS:0;94eedbb855cf:41713 {}] zookeeper.ZKUtil(111): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/94eedbb855cf,41713,1732617085433 2024-11-26T10:31:25,673 WARN [RS:0;94eedbb855cf:41713 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:31:25,673 INFO [RS:0;94eedbb855cf:41713 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:31:25,673 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/94eedbb855cf,41713,1732617085433 2024-11-26T10:31:25,673 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [94eedbb855cf,41713,1732617085433] 2024-11-26T10:31:25,676 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:31:25,677 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:31:25,678 INFO [RS:0;94eedbb855cf:41713 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:31:25,678 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,678 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-26T10:31:25,678 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-26T10:31:25,678 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/94eedbb855cf:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/94eedbb855cf:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:31:25,679 DEBUG [RS:0;94eedbb855cf:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/94eedbb855cf:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:31:25,679 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,679 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,679 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,679 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,679 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,679 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41713,1732617085433-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:31:25,693 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:31:25,694 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41713,1732617085433-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,694 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,694 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.Replication(171): 94eedbb855cf,41713,1732617085433 started 2024-11-26T10:31:25,707 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:25,707 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1482): Serving as 94eedbb855cf,41713,1732617085433, RpcServer on 94eedbb855cf/172.17.0.2:41713, sessionid=0x10153d48c4f0001 2024-11-26T10:31:25,707 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:31:25,707 DEBUG [RS:0;94eedbb855cf:41713 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 94eedbb855cf,41713,1732617085433 2024-11-26T10:31:25,707 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,41713,1732617085433' 2024-11-26T10:31:25,707 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:31:25,707 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 94eedbb855cf,41713,1732617085433 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '94eedbb855cf,41713,1732617085433' 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:31:25,708 DEBUG [RS:0;94eedbb855cf:41713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:31:25,708 INFO [RS:0;94eedbb855cf:41713 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:31:25,708 INFO [RS:0;94eedbb855cf:41713 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:31:25,762 WARN [94eedbb855cf:41949 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-26T10:31:25,810 INFO [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C41713%2C1732617085433, suffix=, logDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/94eedbb855cf,41713,1732617085433, archiveDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs, maxLogs=32 2024-11-26T10:31:25,811 INFO [RS:0;94eedbb855cf:41713 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C41713%2C1732617085433.1732617085810 2024-11-26T10:31:25,816 INFO [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/94eedbb855cf,41713,1732617085433/94eedbb855cf%2C41713%2C1732617085433.1732617085810 2024-11-26T10:31:25,817 DEBUG [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42447:42447),(127.0.0.1/127.0.0.1:42523:42523)] 2024-11-26T10:31:25,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,39711,1732616873796/94eedbb855cf%2C39711%2C1732616873796.1732616874000 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:25,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43805/user/jenkins/test-data/232f00b7-a689-6e3f-b099-f7bab65710bf/WALs/94eedbb855cf,35079,1732616872640/94eedbb855cf%2C35079%2C1732616872640.meta.1732616873623.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-26T10:31:26,012 DEBUG [94eedbb855cf:41949 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:31:26,013 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=94eedbb855cf,41713,1732617085433 2024-11-26T10:31:26,014 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,41713,1732617085433, state=OPENING 2024-11-26T10:31:26,016 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:31:26,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:26,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:26,019 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:31:26,019 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:31:26,019 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:31:26,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,41713,1732617085433}] 2024-11-26T10:31:26,172 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:31:26,174 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41023, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:31:26,177 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-26T10:31:26,177 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:31:26,179 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=94eedbb855cf%2C41713%2C1732617085433.meta, suffix=.meta, logDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/94eedbb855cf,41713,1732617085433, archiveDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs, maxLogs=32 2024-11-26T10:31:26,179 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 94eedbb855cf%2C41713%2C1732617085433.meta.1732617086179.meta 2024-11-26T10:31:26,185 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/94eedbb855cf,41713,1732617085433/94eedbb855cf%2C41713%2C1732617085433.meta.1732617086179.meta 2024-11-26T10:31:26,187 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42523:42523),(127.0.0.1/127.0.0.1:42447:42447)] 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:31:26,188 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-26T10:31:26,188 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-26T10:31:26,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:31:26,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:31:26,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:26,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:26,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-26T10:31:26,191 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-26T10:31:26,191 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:26,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:26,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:31:26,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:31:26,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:26,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:26,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:31:26,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:31:26,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:31:26,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:31:26,194 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-26T10:31:26,194 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740 2024-11-26T10:31:26,195 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740 2024-11-26T10:31:26,196 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-26T10:31:26,196 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-26T10:31:26,197 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:31:26,198 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-26T10:31:26,198 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832095, jitterRate=0.05806373059749603}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:31:26,198 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-26T10:31:26,199 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732617086188Writing region info on filesystem at 1732617086188Initializing all the Stores at 1732617086189 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617086189Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617086189Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732617086189Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732617086189Cleaning up temporary data from old regions at 1732617086196 (+7 ms)Running coprocessor post-open hooks at 1732617086198 (+2 ms)Region opened successfully at 1732617086199 (+1 ms) 2024-11-26T10:31:26,200 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732617086172 2024-11-26T10:31:26,202 DEBUG [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:31:26,202 INFO [RS_OPEN_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-26T10:31:26,202 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=94eedbb855cf,41713,1732617085433 2024-11-26T10:31:26,203 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 94eedbb855cf,41713,1732617085433, state=OPEN 2024-11-26T10:31:26,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:31:26,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:31:26,211 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=94eedbb855cf,41713,1732617085433 2024-11-26T10:31:26,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:31:26,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:31:26,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:31:26,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=94eedbb855cf,41713,1732617085433 in 192 msec 2024-11-26T10:31:26,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:31:26,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-26T10:31:26,216 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:31:26,216 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-26T10:31:26,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:31:26,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,41713,1732617085433, seqNum=-1] 2024-11-26T10:31:26,218 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:31:26,219 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58369, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:31:26,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 645 msec 2024-11-26T10:31:26,224 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732617086224, completionTime=-1 2024-11-26T10:31:26,224 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:31:26,224 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732617146226 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732617206226 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41949,1732617085388-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41949,1732617085388-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41949,1732617085388-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-94eedbb855cf:41949, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,226 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,227 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,228 DEBUG [master/94eedbb855cf:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.760sec 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41949,1732617085388-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:31:26,230 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41949,1732617085388-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:31:26,232 DEBUG [master/94eedbb855cf:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:31:26,232 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:31:26,232 INFO [master/94eedbb855cf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=94eedbb855cf,41949,1732617085388-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:31:26,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e66231a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:31:26,249 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 94eedbb855cf,41949,-1 for getting cluster id 2024-11-26T10:31:26,249 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-26T10:31:26,250 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9f6ea6a6-2165-4ae1-9944-aa9787bae552' 2024-11-26T10:31:26,251 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-26T10:31:26,251 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9f6ea6a6-2165-4ae1-9944-aa9787bae552" 2024-11-26T10:31:26,251 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be746b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:31:26,251 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [94eedbb855cf,41949,-1] 2024-11-26T10:31:26,251 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-26T10:31:26,251 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:26,252 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33654, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-26T10:31:26,253 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30d018ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:31:26,253 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-26T10:31:26,254 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=94eedbb855cf,41713,1732617085433, seqNum=-1] 2024-11-26T10:31:26,254 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:31:26,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39930, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:31:26,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=94eedbb855cf,41949,1732617085388 2024-11-26T10:31:26,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:31:26,259 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-26T10:31:26,259 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-26T10:31:26,261 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/test.com,8080,1, archiveDir=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs, maxLogs=32 2024-11-26T10:31:26,261 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732617086261 2024-11-26T10:31:26,266 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732617086261 2024-11-26T10:31:26,279 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42523:42523),(127.0.0.1/127.0.0.1:42447:42447)] 2024-11-26T10:31:26,281 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732617086280 2024-11-26T10:31:26,285 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,285 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,285 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,285 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,285 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,285 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732617086261 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732617086280 2024-11-26T10:31:26,286 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42447:42447),(127.0.0.1/127.0.0.1:42523:42523)] 2024-11-26T10:31:26,286 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732617086261 is not closed yet, will try archiving it next time 2024-11-26T10:31:26,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741835_1011 (size=93) 2024-11-26T10:31:26,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741835_1011 (size=93) 2024-11-26T10:31:26,288 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732617086261 to hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs/test.com%2C8080%2C1.1732617086261 2024-11-26T10:31:26,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741836_1012 (size=93) 2024-11-26T10:31:26,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741836_1012 (size=93) 2024-11-26T10:31:26,291 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs 2024-11-26T10:31:26,291 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732617086280) 2024-11-26T10:31:26,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-26T10:31:26,291 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:31:26,291 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:31:26,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:26,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:26,291 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-26T10:31:26,291 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:31:26,291 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=766421391, stopped=false 2024-11-26T10:31:26,291 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=94eedbb855cf,41949,1732617085388 2024-11-26T10:31:26,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:31:26,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:31:26,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:26,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:26,293 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:31:26,293 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-26T10:31:26,293 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:31:26,293 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:26,294 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:31:26,294 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '94eedbb855cf,41713,1732617085433' ***** 2024-11-26T10:31:26,294 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-26T10:31:26,294 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:31:26,294 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(959): stopping server 94eedbb855cf,41713,1732617085433 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;94eedbb855cf:41713. 2024-11-26T10:31:26,294 DEBUG [RS:0;94eedbb855cf:41713 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:31:26,294 DEBUG [RS:0;94eedbb855cf:41713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:31:26,294 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:31:26,295 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-26T10:31:26,295 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-26T10:31:26,295 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-26T10:31:26,295 DEBUG [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-26T10:31:26,295 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:31:26,295 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-26T10:31:26,295 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-26T10:31:26,295 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:31:26,295 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:31:26,295 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-26T10:31:26,311 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/.tmp/ns/127aa5102069488c9bb51efff74b9132 is 43, key is default/ns:d/1732617086219/Put/seqid=0 2024-11-26T10:31:26,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741837_1013 (size=5153) 2024-11-26T10:31:26,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741837_1013 (size=5153) 2024-11-26T10:31:26,316 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/.tmp/ns/127aa5102069488c9bb51efff74b9132 2024-11-26T10:31:26,321 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/.tmp/ns/127aa5102069488c9bb51efff74b9132 as hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/ns/127aa5102069488c9bb51efff74b9132 2024-11-26T10:31:26,325 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/ns/127aa5102069488c9bb51efff74b9132, entries=2, sequenceid=6, filesize=5.0 K 2024-11-26T10:31:26,326 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-26T10:31:26,326 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:31:26,330 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-26T10:31:26,330 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:31:26,330 INFO [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-26T10:31:26,330 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732617086295Running coprocessor pre-close hooks at 1732617086295Disabling compacts and flushes for region at 1732617086295Disabling writes for close at 1732617086295Obtaining lock to block concurrent updates at 1732617086295Preparing flush snapshotting stores in 1588230740 at 1732617086295Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732617086296 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732617086296Flushing 1588230740/ns: creating writer at 1732617086296Flushing 1588230740/ns: appending metadata at 1732617086310 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732617086310Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54c9b02f: reopening flushed file at 1732617086321 (+11 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1732617086326 (+5 ms)Writing region close event to WAL at 1732617086327 (+1 ms)Running coprocessor post-close hooks at 1732617086330 (+3 ms)Closed at 1732617086330 2024-11-26T10:31:26,330 DEBUG [RS_CLOSE_META-regionserver/94eedbb855cf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:31:26,495 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(976): stopping server 94eedbb855cf,41713,1732617085433; all regions closed. 2024-11-26T10:31:26,496 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,496 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,496 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,496 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,496 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741834_1010 (size=1152) 2024-11-26T10:31:26,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741834_1010 (size=1152) 2024-11-26T10:31:26,500 DEBUG [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs 2024-11-26T10:31:26,500 INFO [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C41713%2C1732617085433.meta:.meta(num 1732617086179) 2024-11-26T10:31:26,500 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741833_1009 (size=93) 2024-11-26T10:31:26,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741833_1009 (size=93) 2024-11-26T10:31:26,504 DEBUG [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/oldWALs 2024-11-26T10:31:26,504 INFO [RS:0;94eedbb855cf:41713 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 94eedbb855cf%2C41713%2C1732617085433:(num 1732617085810) 2024-11-26T10:31:26,504 DEBUG [RS:0;94eedbb855cf:41713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:31:26,504 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:31:26,504 INFO [RS:0;94eedbb855cf:41713 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:31:26,505 INFO [RS:0;94eedbb855cf:41713 {}] hbase.ChoreService(370): Chore service for: regionserver/94eedbb855cf:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-26T10:31:26,505 INFO [RS:0;94eedbb855cf:41713 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:31:26,505 INFO [regionserver/94eedbb855cf:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:31:26,505 INFO [RS:0;94eedbb855cf:41713 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41713 2024-11-26T10:31:26,507 INFO [RS:0;94eedbb855cf:41713 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:31:26,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:31:26,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/94eedbb855cf,41713,1732617085433 2024-11-26T10:31:26,509 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [94eedbb855cf,41713,1732617085433] 2024-11-26T10:31:26,510 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/94eedbb855cf,41713,1732617085433 already deleted, retry=false 2024-11-26T10:31:26,510 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 94eedbb855cf,41713,1732617085433 expired; onlineServers=0 2024-11-26T10:31:26,510 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '94eedbb855cf,41949,1732617085388' ***** 2024-11-26T10:31:26,510 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:31:26,510 INFO [M:0;94eedbb855cf:41949 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-26T10:31:26,510 INFO [M:0;94eedbb855cf:41949 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-26T10:31:26,510 DEBUG [M:0;94eedbb855cf:41949 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:31:26,511 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:31:26,511 DEBUG [M:0;94eedbb855cf:41949 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:31:26,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732617085582 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.small.0-1732617085582,5,FailOnTimeoutGroup] 2024-11-26T10:31:26,511 DEBUG [master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732617085582 {}] cleaner.HFileCleaner(306): Exit Thread[master/94eedbb855cf:0:becomeActiveMaster-HFileCleaner.large.0-1732617085582,5,FailOnTimeoutGroup] 2024-11-26T10:31:26,511 INFO [M:0;94eedbb855cf:41949 {}] hbase.ChoreService(370): Chore service for: master/94eedbb855cf:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-26T10:31:26,511 INFO [M:0;94eedbb855cf:41949 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-26T10:31:26,511 DEBUG [M:0;94eedbb855cf:41949 {}] master.HMaster(1795): Stopping service threads 2024-11-26T10:31:26,511 INFO [M:0;94eedbb855cf:41949 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:31:26,511 INFO [M:0;94eedbb855cf:41949 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-26T10:31:26,511 INFO [M:0;94eedbb855cf:41949 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:31:26,511 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:31:26,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:31:26,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:31:26,512 DEBUG [M:0;94eedbb855cf:41949 {}] zookeeper.ZKUtil(347): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:31:26,512 WARN [M:0;94eedbb855cf:41949 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:31:26,513 INFO [M:0;94eedbb855cf:41949 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/.lastflushedseqids 2024-11-26T10:31:26,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741838_1014 (size=99) 2024-11-26T10:31:26,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741838_1014 (size=99) 2024-11-26T10:31:26,518 INFO [M:0;94eedbb855cf:41949 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-26T10:31:26,518 INFO [M:0;94eedbb855cf:41949 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:31:26,518 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:31:26,518 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:26,518 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:26,518 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:31:26,518 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:26,518 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-26T10:31:26,533 DEBUG [M:0;94eedbb855cf:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/42beb83ef1574c8d89dc62f35f39208d is 82, key is hbase:meta,,1/info:regioninfo/1732617086202/Put/seqid=0 2024-11-26T10:31:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741839_1015 (size=5672) 2024-11-26T10:31:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741839_1015 (size=5672) 2024-11-26T10:31:26,538 INFO [M:0;94eedbb855cf:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/42beb83ef1574c8d89dc62f35f39208d 2024-11-26T10:31:26,556 DEBUG [M:0;94eedbb855cf:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a531af833cc14911ac7be4f8a05b07e7 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732617086223/Put/seqid=0 2024-11-26T10:31:26,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741840_1016 (size=5275) 2024-11-26T10:31:26,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741840_1016 (size=5275) 2024-11-26T10:31:26,561 INFO [M:0;94eedbb855cf:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a531af833cc14911ac7be4f8a05b07e7 2024-11-26T10:31:26,578 DEBUG [M:0;94eedbb855cf:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d419426e69e48109c434846c068f6da is 69, key is 94eedbb855cf,41713,1732617085433/rs:state/1732617085668/Put/seqid=0 2024-11-26T10:31:26,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741841_1017 (size=5156) 2024-11-26T10:31:26,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741841_1017 (size=5156) 2024-11-26T10:31:26,582 INFO [M:0;94eedbb855cf:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d419426e69e48109c434846c068f6da 2024-11-26T10:31:26,599 DEBUG [M:0;94eedbb855cf:41949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8655579bbe8042088098a0b4ce8667d0 is 52, key is load_balancer_on/state:d/1732617086258/Put/seqid=0 2024-11-26T10:31:26,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741842_1018 (size=5056) 2024-11-26T10:31:26,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741842_1018 (size=5056) 2024-11-26T10:31:26,604 INFO [M:0;94eedbb855cf:41949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8655579bbe8042088098a0b4ce8667d0 2024-11-26T10:31:26,608 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/42beb83ef1574c8d89dc62f35f39208d as hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/42beb83ef1574c8d89dc62f35f39208d 2024-11-26T10:31:26,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:26,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x10153d48c4f0001, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:26,609 INFO [RS:0;94eedbb855cf:41713 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:31:26,609 INFO [RS:0;94eedbb855cf:41713 {}] regionserver.HRegionServer(1031): Exiting; stopping=94eedbb855cf,41713,1732617085433; zookeeper connection closed. 2024-11-26T10:31:26,610 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3505e352 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3505e352 2024-11-26T10:31:26,610 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:31:26,612 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/42beb83ef1574c8d89dc62f35f39208d, entries=8, sequenceid=29, filesize=5.5 K 2024-11-26T10:31:26,613 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a531af833cc14911ac7be4f8a05b07e7 as hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a531af833cc14911ac7be4f8a05b07e7 2024-11-26T10:31:26,617 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a531af833cc14911ac7be4f8a05b07e7, entries=3, sequenceid=29, filesize=5.2 K 2024-11-26T10:31:26,617 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d419426e69e48109c434846c068f6da as hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7d419426e69e48109c434846c068f6da 2024-11-26T10:31:26,621 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7d419426e69e48109c434846c068f6da, entries=1, sequenceid=29, filesize=5.0 K 2024-11-26T10:31:26,622 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8655579bbe8042088098a0b4ce8667d0 as hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8655579bbe8042088098a0b4ce8667d0 2024-11-26T10:31:26,626 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32955/user/jenkins/test-data/5d2a5625-d21f-acdd-fc1b-91e017d7b6ad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8655579bbe8042088098a0b4ce8667d0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-26T10:31:26,627 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false 2024-11-26T10:31:26,628 INFO [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:31:26,628 DEBUG [M:0;94eedbb855cf:41949 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732617086518Disabling compacts and flushes for region at 1732617086518Disabling writes for close at 1732617086518Obtaining lock to block concurrent updates at 1732617086518Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732617086518Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732617086519 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732617086519Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732617086519Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732617086533 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732617086533Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732617086542 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732617086556 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732617086556Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732617086564 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732617086578 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732617086578Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732617086586 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732617086599 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732617086599Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c8cdbce: reopening flushed file at 1732617086607 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ad799c8: reopening flushed file at 1732617086612 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@672b773f: reopening flushed file at 1732617086617 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b82bd1: reopening flushed file at 1732617086621 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false at 1732617086627 (+6 ms)Writing region close event to WAL at 1732617086628 (+1 ms)Closed at 1732617086628 2024-11-26T10:31:26,629 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,629 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,629 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,629 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,629 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-26T10:31:26,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39753 is added to blk_1073741830_1006 (size=10311) 2024-11-26T10:31:26,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36573 is added to blk_1073741830_1006 (size=10311) 2024-11-26T10:31:26,631 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-26T10:31:26,631 INFO [M:0;94eedbb855cf:41949 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-26T10:31:26,632 INFO [M:0;94eedbb855cf:41949 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41949 2024-11-26T10:31:26,632 INFO [M:0;94eedbb855cf:41949 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-26T10:31:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:26,734 INFO [M:0;94eedbb855cf:41949 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-26T10:31:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41949-0x10153d48c4f0000, quorum=127.0.0.1:59978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:31:26,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ec9da7e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:31:26,737 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7acdff1a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:31:26,737 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:31:26,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@663b7fb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:31:26,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a8a2fb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir/,STOPPED} 2024-11-26T10:31:26,738 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:31:26,738 WARN [BP-1732810580-172.17.0.2-1732617084652 heartbeating to localhost/127.0.0.1:32955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:31:26,738 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:31:26,738 WARN [BP-1732810580-172.17.0.2-1732617084652 heartbeating to localhost/127.0.0.1:32955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1732810580-172.17.0.2-1732617084652 (Datanode Uuid 97537d47-5a8b-48ac-adeb-882f12072389) service to localhost/127.0.0.1:32955 2024-11-26T10:31:26,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data3/current/BP-1732810580-172.17.0.2-1732617084652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:26,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data4/current/BP-1732810580-172.17.0.2-1732617084652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:26,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:31:26,741 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d664f93{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:31:26,741 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@96dff09{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:31:26,741 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:31:26,741 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2482618b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:31:26,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a2ae1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir/,STOPPED} 2024-11-26T10:31:26,743 WARN [BP-1732810580-172.17.0.2-1732617084652 heartbeating to localhost/127.0.0.1:32955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:31:26,743 WARN [BP-1732810580-172.17.0.2-1732617084652 heartbeating to localhost/127.0.0.1:32955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1732810580-172.17.0.2-1732617084652 (Datanode Uuid 9eb222db-2d4c-4144-85f6-dff970774cd5) service to localhost/127.0.0.1:32955 2024-11-26T10:31:26,743 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:31:26,743 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data2/current/BP-1732810580-172.17.0.2-1732617084652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:26,743 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:31:26,743 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:31:26,744 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/cluster_4d7273ce-eed7-8952-0866-bbd49f72f0b2/data/data1/current/BP-1732810580-172.17.0.2-1732617084652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:31:26,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60c41d21{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:31:26,749 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3637fc70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:31:26,749 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:31:26,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61290c18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:31:26,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73544558{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6dbb6ce7-05b2-3504-cc17-9aaebe431c13/hadoop.log.dir/,STOPPED} 2024-11-26T10:31:26,755 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:31:26,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-26T10:31:26,780 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 228) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32955 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32955 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32955 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:32955 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32955 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=51 (was 51), ProcessCount=11 (was 11), AvailableMemoryMB=6028 (was 6034)