2024-11-24 16:50:09,710 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 16:50:09,724 main DEBUG Took 0.011373 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 16:50:09,724 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 16:50:09,724 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 16:50:09,725 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 16:50:09,726 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,733 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 16:50:09,745 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,747 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,747 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,748 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,749 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,749 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,750 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,751 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,751 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,752 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,753 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,753 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,753 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,754 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,754 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,754 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,755 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 16:50:09,755 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,756 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 16:50:09,757 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 16:50:09,758 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 16:50:09,760 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 16:50:09,760 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 16:50:09,761 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 16:50:09,761 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 16:50:09,769 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 16:50:09,772 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 16:50:09,773 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 16:50:09,774 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 16:50:09,774 main DEBUG createAppenders(={Console}) 2024-11-24 16:50:09,775 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-24 16:50:09,775 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 16:50:09,775 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-24 16:50:09,776 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 16:50:09,776 main DEBUG OutputStream closed 2024-11-24 16:50:09,776 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 16:50:09,776 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 16:50:09,777 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-24 16:50:09,848 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 16:50:09,850 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 16:50:09,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 16:50:09,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 16:50:09,852 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 16:50:09,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 16:50:09,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 16:50:09,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 16:50:09,853 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 16:50:09,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 16:50:09,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 16:50:09,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 16:50:09,854 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 16:50:09,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 16:50:09,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 16:50:09,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 16:50:09,856 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 16:50:09,856 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 16:50:09,859 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 16:50:09,859 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-24 16:50:09,859 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 16:50:09,860 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-24T16:50:10,085 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9 2024-11-24 16:50:10,088 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 16:50:10,089 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T16:50:10,098 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-24T16:50:10,133 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=201, ProcessCount=11, AvailableMemoryMB=6834 2024-11-24T16:50:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:50:10,157 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465, deleteOnExit=true 2024-11-24T16:50:10,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:50:10,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/test.cache.data in system properties and HBase conf 2024-11-24T16:50:10,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:50:10,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:50:10,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:50:10,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:50:10,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:50:10,276 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T16:50:10,389 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:50:10,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:50:10,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:50:10,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:50:10,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:50:10,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:50:10,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:50:10,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:50:10,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:50:10,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:50:10,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:50:10,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:50:10,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:50:10,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:50:10,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:50:10,902 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:50:11,264 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T16:50:11,348 INFO [Time-limited test {}] log.Log(170): Logging initialized @2309ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T16:50:11,433 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:50:11,495 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:50:11,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:50:11,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:50:11,516 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:50:11,529 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:50:11,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:50:11,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:50:11,733 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/java.io.tmpdir/jetty-localhost-42383-hadoop-hdfs-3_4_1-tests_jar-_-any-1089835506400174901/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:50:11,741 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:42383} 2024-11-24T16:50:11,741 INFO [Time-limited test {}] server.Server(415): Started @2703ms 2024-11-24T16:50:11,766 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:50:12,137 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:50:12,143 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:50:12,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:50:12,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:50:12,145 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:50:12,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:50:12,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:50:12,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/java.io.tmpdir/jetty-localhost-40547-hadoop-hdfs-3_4_1-tests_jar-_-any-3984785078106133727/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:50:12,279 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:40547} 2024-11-24T16:50:12,279 INFO [Time-limited test {}] server.Server(415): Started @3241ms 2024-11-24T16:50:12,343 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:50:12,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:50:12,472 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:50:12,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:50:12,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:50:12,473 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:50:12,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:50:12,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:50:12,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/java.io.tmpdir/jetty-localhost-36847-hadoop-hdfs-3_4_1-tests_jar-_-any-7119741095583730958/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:50:12,596 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:36847} 2024-11-24T16:50:12,596 INFO [Time-limited test {}] server.Server(415): Started @3558ms 2024-11-24T16:50:12,599 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:50:12,830 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data2/current/BP-1592658052-172.17.0.2-1732467011005/current, will proceed with Du for space computation calculation, 2024-11-24T16:50:12,830 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data1/current/BP-1592658052-172.17.0.2-1732467011005/current, will proceed with Du for space computation calculation, 2024-11-24T16:50:12,830 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data4/current/BP-1592658052-172.17.0.2-1732467011005/current, will proceed with Du for space computation calculation, 2024-11-24T16:50:12,830 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data3/current/BP-1592658052-172.17.0.2-1732467011005/current, will proceed with Du for space computation calculation, 2024-11-24T16:50:12,883 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:50:12,885 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:50:12,961 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab50516c426768bd with lease ID 0x8db6771d295a2b75: Processing first storage report for DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5 from datanode DatanodeRegistration(127.0.0.1:34837, datanodeUuid=c2a2ece3-fc78-4c9d-b94a-8a86518f6f03, infoPort=40043, infoSecurePort=0, ipcPort=45753, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005) 2024-11-24T16:50:12,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab50516c426768bd with lease ID 0x8db6771d295a2b75: from storage DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5 node DatanodeRegistration(127.0.0.1:34837, datanodeUuid=c2a2ece3-fc78-4c9d-b94a-8a86518f6f03, infoPort=40043, infoSecurePort=0, ipcPort=45753, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-24T16:50:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24d085311ad565e4 with lease ID 0x8db6771d295a2b74: Processing first storage report for DS-228fca95-a711-4772-b7d9-fa822ba4d913 from datanode DatanodeRegistration(127.0.0.1:33785, datanodeUuid=46021085-9e33-4037-9df8-f8852bffa368, infoPort=36531, infoSecurePort=0, ipcPort=38459, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005) 2024-11-24T16:50:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24d085311ad565e4 with lease ID 0x8db6771d295a2b74: from storage DS-228fca95-a711-4772-b7d9-fa822ba4d913 node DatanodeRegistration(127.0.0.1:33785, datanodeUuid=46021085-9e33-4037-9df8-f8852bffa368, infoPort=36531, infoSecurePort=0, ipcPort=38459, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:50:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab50516c426768bd with lease ID 0x8db6771d295a2b75: Processing first storage report for DS-37db4cf0-313b-4f5d-9249-d63c7bf3089c from datanode DatanodeRegistration(127.0.0.1:34837, datanodeUuid=c2a2ece3-fc78-4c9d-b94a-8a86518f6f03, infoPort=40043, infoSecurePort=0, ipcPort=45753, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005) 2024-11-24T16:50:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab50516c426768bd with lease ID 0x8db6771d295a2b75: from storage DS-37db4cf0-313b-4f5d-9249-d63c7bf3089c node DatanodeRegistration(127.0.0.1:34837, datanodeUuid=c2a2ece3-fc78-4c9d-b94a-8a86518f6f03, infoPort=40043, infoSecurePort=0, ipcPort=45753, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:50:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24d085311ad565e4 with lease ID 0x8db6771d295a2b74: Processing first storage report for DS-ae6ec016-5fba-4b72-88c7-76276cc4c3f3 from datanode DatanodeRegistration(127.0.0.1:33785, datanodeUuid=46021085-9e33-4037-9df8-f8852bffa368, infoPort=36531, infoSecurePort=0, ipcPort=38459, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005) 2024-11-24T16:50:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24d085311ad565e4 with lease ID 0x8db6771d295a2b74: from storage DS-ae6ec016-5fba-4b72-88c7-76276cc4c3f3 node DatanodeRegistration(127.0.0.1:33785, datanodeUuid=46021085-9e33-4037-9df8-f8852bffa368, infoPort=36531, infoSecurePort=0, ipcPort=38459, storageInfo=lv=-57;cid=testClusterID;nsid=1210041377;c=1732467011005), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:50:12,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9 2024-11-24T16:50:13,070 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/zookeeper_0, clientPort=53097, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:50:13,080 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53097 2024-11-24T16:50:13,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:13,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:13,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:50:13,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:50:13,753 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b with version=8 2024-11-24T16:50:13,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:50:13,844 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T16:50:14,092 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:50:14,103 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:50:14,103 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:50:14,108 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:50:14,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:50:14,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:50:14,244 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:50:14,306 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T16:50:14,317 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T16:50:14,322 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:50:14,350 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 2906 (auto-detected) 2024-11-24T16:50:14,351 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T16:50:14,370 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36731 2024-11-24T16:50:14,392 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36731 connecting to ZooKeeper ensemble=127.0.0.1:53097 2024-11-24T16:50:14,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:367310x0, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:50:14,431 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36731-0x1014ae2a04b0000 connected 2024-11-24T16:50:14,456 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:14,458 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:14,468 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:50:14,472 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b, hbase.cluster.distributed=false 2024-11-24T16:50:14,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:50:14,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36731 2024-11-24T16:50:14,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36731 2024-11-24T16:50:14,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36731 2024-11-24T16:50:14,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36731 2024-11-24T16:50:14,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36731 2024-11-24T16:50:14,617 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:50:14,619 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:50:14,619 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:50:14,619 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:50:14,619 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:50:14,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:50:14,623 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:50:14,625 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:50:14,626 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39595 2024-11-24T16:50:14,628 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39595 connecting to ZooKeeper ensemble=127.0.0.1:53097 2024-11-24T16:50:14,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:14,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:14,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395950x0, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:50:14,641 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39595-0x1014ae2a04b0001 connected 2024-11-24T16:50:14,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:50:14,645 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:50:14,653 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:50:14,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:50:14,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:50:14,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39595 2024-11-24T16:50:14,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39595 2024-11-24T16:50:14,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39595 2024-11-24T16:50:14,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39595 2024-11-24T16:50:14,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39595 2024-11-24T16:50:14,680 DEBUG [M:0;e335232f7491:36731 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:36731 2024-11-24T16:50:14,681 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,36731,1732467013895 2024-11-24T16:50:14,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:50:14,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:50:14,689 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,36731,1732467013895 2024-11-24T16:50:14,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:50:14,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:14,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:14,711 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:50:14,712 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,36731,1732467013895 from backup master directory 2024-11-24T16:50:14,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,36731,1732467013895 2024-11-24T16:50:14,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:50:14,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:50:14,715 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:50:14,715 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,36731,1732467013895 2024-11-24T16:50:14,717 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T16:50:14,719 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T16:50:14,773 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase.id] with ID: 08c9fde8-6691-47cc-bb65-13aabba7967b 2024-11-24T16:50:14,773 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/.tmp/hbase.id 2024-11-24T16:50:14,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:50:14,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:50:14,787 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/.tmp/hbase.id]:[hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase.id] 2024-11-24T16:50:14,829 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:14,833 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:50:14,852 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-24T16:50:14,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:14,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:14,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:50:14,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:50:14,892 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:50:14,894 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:50:14,900 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:50:14,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:50:14,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:50:14,948 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store 2024-11-24T16:50:14,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:50:14,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:50:14,973 INFO [master/e335232f7491:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T16:50:14,976 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:50:14,977 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:50:14,977 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:50:14,978 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:50:14,979 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:50:14,979 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:50:14,979 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:50:14,980 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467014977Disabling compacts and flushes for region at 1732467014977Disabling writes for close at 1732467014979 (+2 ms)Writing region close event to WAL at 1732467014979Closed at 1732467014979 2024-11-24T16:50:14,982 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/.initializing 2024-11-24T16:50:14,983 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/WALs/e335232f7491,36731,1732467013895 2024-11-24T16:50:15,005 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C36731%2C1732467013895, suffix=, logDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/WALs/e335232f7491,36731,1732467013895, archiveDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/oldWALs, maxLogs=10 2024-11-24T16:50:15,013 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C36731%2C1732467013895.1732467015009 2024-11-24T16:50:15,034 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/WALs/e335232f7491,36731,1732467013895/e335232f7491%2C36731%2C1732467013895.1732467015009 2024-11-24T16:50:15,045 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:50:15,049 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:50:15,049 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:50:15,053 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,055 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,094 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:50:15,124 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:15,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:50:15,130 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:50:15,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:50:15,134 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:50:15,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:50:15,138 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:50:15,139 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,143 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,144 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,150 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,150 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,153 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:50:15,158 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:50:15,162 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:50:15,164 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857236, jitterRate=0.09003236889839172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:50:15,171 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467015070Initializing all the Stores at 1732467015072 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467015072Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467015073 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467015073Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467015073Cleaning up temporary data from old regions at 1732467015150 (+77 ms)Region opened successfully at 1732467015171 (+21 ms) 2024-11-24T16:50:15,172 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:50:15,206 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@616af3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:50:15,238 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:50:15,251 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:50:15,251 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:50:15,255 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:50:15,256 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-24T16:50:15,262 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-24T16:50:15,262 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:50:15,291 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:50:15,300 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:50:15,302 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:50:15,305 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:50:15,306 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:50:15,308 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:50:15,310 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:50:15,313 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:50:15,314 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:50:15,316 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:50:15,318 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:50:15,335 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:50:15,336 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:50:15,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:50:15,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:50:15,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:15,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:15,342 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,36731,1732467013895, sessionid=0x1014ae2a04b0000, setting cluster-up flag (Was=false) 2024-11-24T16:50:15,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:15,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:15,361 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:50:15,363 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,36731,1732467013895 2024-11-24T16:50:15,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:15,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:15,375 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:50:15,377 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,36731,1732467013895 2024-11-24T16:50:15,383 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:50:15,455 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:50:15,465 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:50:15,468 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(746): ClusterId : 08c9fde8-6691-47cc-bb65-13aabba7967b 2024-11-24T16:50:15,471 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:50:15,473 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:50:15,476 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:50:15,476 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:50:15,479 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:50:15,479 DEBUG [RS:0;e335232f7491:39595 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1283c177, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:50:15,480 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,36731,1732467013895 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:50:15,489 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:50:15,489 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:50:15,489 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:50:15,490 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:50:15,490 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:50:15,490 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,490 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:50:15,491 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,494 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:39595 2024-11-24T16:50:15,497 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467045496 2024-11-24T16:50:15,497 INFO [RS:0;e335232f7491:39595 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:50:15,497 INFO [RS:0;e335232f7491:39595 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:50:15,497 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:50:15,498 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:50:15,499 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:50:15,500 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,36731,1732467013895 with port=39595, startcode=1732467014577 2024-11-24T16:50:15,501 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:50:15,501 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:50:15,503 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:50:15,504 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:50:15,504 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:50:15,504 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:50:15,505 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,509 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:50:15,509 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,509 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:50:15,510 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:50:15,511 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:50:15,511 DEBUG [RS:0;e335232f7491:39595 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:50:15,513 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:50:15,513 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:50:15,515 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467015514,5,FailOnTimeoutGroup] 2024-11-24T16:50:15,516 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467015515,5,FailOnTimeoutGroup] 2024-11-24T16:50:15,516 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,516 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:50:15,518 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,518 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:50:15,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:50:15,526 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:50:15,526 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b 2024-11-24T16:50:15,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:50:15,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:50:15,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:50:15,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:50:15,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:50:15,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:15,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:50:15,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:50:15,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:15,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:50:15,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:50:15,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:15,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:50:15,560 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:50:15,560 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:15,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:15,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:50:15,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740 2024-11-24T16:50:15,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740 2024-11-24T16:50:15,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:50:15,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:50:15,567 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:50:15,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:50:15,573 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:50:15,574 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827415, jitterRate=0.05211383104324341}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:50:15,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467015541Initializing all the Stores at 1732467015543 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467015543Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467015544 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467015544Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467015544Cleaning up temporary data from old regions at 1732467015566 (+22 ms)Region opened successfully at 1732467015576 (+10 ms) 2024-11-24T16:50:15,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:50:15,577 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:50:15,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:50:15,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:50:15,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:50:15,578 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:50:15,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467015577Disabling compacts and flushes for region at 1732467015577Disabling writes for close at 1732467015577Writing region close event to WAL at 1732467015578 (+1 ms)Closed at 1732467015578 2024-11-24T16:50:15,581 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:50:15,581 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:50:15,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:50:15,594 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55065, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:50:15,597 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:50:15,600 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36731 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,39595,1732467014577 2024-11-24T16:50:15,600 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:50:15,603 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36731 {}] master.ServerManager(517): Registering regionserver=e335232f7491,39595,1732467014577 2024-11-24T16:50:15,616 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b 2024-11-24T16:50:15,616 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36099 2024-11-24T16:50:15,616 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:50:15,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:50:15,621 DEBUG [RS:0;e335232f7491:39595 {}] zookeeper.ZKUtil(111): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,39595,1732467014577 2024-11-24T16:50:15,621 WARN [RS:0;e335232f7491:39595 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:50:15,621 INFO [RS:0;e335232f7491:39595 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:50:15,622 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577 2024-11-24T16:50:15,623 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,39595,1732467014577] 2024-11-24T16:50:15,648 INFO [RS:0;e335232f7491:39595 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:50:15,662 INFO [RS:0;e335232f7491:39595 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:50:15,667 INFO [RS:0;e335232f7491:39595 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:50:15,667 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,668 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:50:15,675 INFO [RS:0;e335232f7491:39595 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:50:15,676 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,676 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,676 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,677 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,677 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,677 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,677 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:50:15,678 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,678 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,678 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,678 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,679 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,679 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:50:15,679 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:50:15,679 DEBUG [RS:0;e335232f7491:39595 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:50:15,680 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,681 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,681 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,681 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,681 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,681 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,39595,1732467014577-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:50:15,700 INFO [RS:0;e335232f7491:39595 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:50:15,702 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,39595,1732467014577-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,702 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,703 INFO [RS:0;e335232f7491:39595 {}] regionserver.Replication(171): e335232f7491,39595,1732467014577 started 2024-11-24T16:50:15,721 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:15,721 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,39595,1732467014577, RpcServer on e335232f7491/172.17.0.2:39595, sessionid=0x1014ae2a04b0001 2024-11-24T16:50:15,722 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:50:15,722 DEBUG [RS:0;e335232f7491:39595 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,39595,1732467014577 2024-11-24T16:50:15,722 DEBUG [RS:0;e335232f7491:39595 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,39595,1732467014577' 2024-11-24T16:50:15,722 DEBUG [RS:0;e335232f7491:39595 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:50:15,723 DEBUG [RS:0;e335232f7491:39595 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:50:15,724 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:50:15,724 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:50:15,724 DEBUG [RS:0;e335232f7491:39595 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,39595,1732467014577 2024-11-24T16:50:15,724 DEBUG [RS:0;e335232f7491:39595 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,39595,1732467014577' 2024-11-24T16:50:15,724 DEBUG [RS:0;e335232f7491:39595 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:50:15,725 DEBUG [RS:0;e335232f7491:39595 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:50:15,726 DEBUG [RS:0;e335232f7491:39595 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:50:15,726 INFO [RS:0;e335232f7491:39595 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:50:15,726 INFO [RS:0;e335232f7491:39595 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:50:15,751 WARN [e335232f7491:36731 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T16:50:15,834 INFO [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C39595%2C1732467014577, suffix=, logDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577, archiveDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs, maxLogs=32 2024-11-24T16:50:15,837 INFO [RS:0;e335232f7491:39595 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467015837 2024-11-24T16:50:15,846 INFO [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467015837 2024-11-24T16:50:15,847 DEBUG [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36531:36531),(127.0.0.1/127.0.0.1:40043:40043)] 2024-11-24T16:50:16,004 DEBUG [e335232f7491:36731 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:50:16,016 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,39595,1732467014577 2024-11-24T16:50:16,022 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,39595,1732467014577, state=OPENING 2024-11-24T16:50:16,028 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:50:16,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:16,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:50:16,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:50:16,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:50:16,032 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:50:16,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,39595,1732467014577}] 2024-11-24T16:50:16,209 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:50:16,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48135, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:50:16,223 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:50:16,224 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:50:16,227 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C39595%2C1732467014577.meta, suffix=.meta, logDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577, archiveDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs, maxLogs=32 2024-11-24T16:50:16,229 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.meta.1732467016229.meta 2024-11-24T16:50:16,237 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.meta.1732467016229.meta 2024-11-24T16:50:16,238 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:50:16,239 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:50:16,241 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:50:16,243 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:50:16,248 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:50:16,253 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:50:16,253 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:50:16,254 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:50:16,254 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:50:16,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:50:16,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:50:16,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:16,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:16,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:50:16,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:50:16,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:16,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:16,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:50:16,263 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:50:16,263 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:16,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:16,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:50:16,265 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:50:16,265 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:16,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:50:16,266 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:50:16,267 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740 2024-11-24T16:50:16,270 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740 2024-11-24T16:50:16,272 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:50:16,272 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:50:16,273 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:50:16,275 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:50:16,277 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852915, jitterRate=0.08453834056854248}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:50:16,277 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:50:16,278 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467016254Writing region info on filesystem at 1732467016254Initializing all the Stores at 1732467016256 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467016256Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467016257 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467016257Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467016257Cleaning up temporary data from old regions at 1732467016272 (+15 ms)Running coprocessor post-open hooks at 1732467016277 (+5 ms)Region opened successfully at 1732467016278 (+1 ms) 2024-11-24T16:50:16,285 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467016200 2024-11-24T16:50:16,296 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:50:16,297 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:50:16,298 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,39595,1732467014577 2024-11-24T16:50:16,300 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,39595,1732467014577, state=OPEN 2024-11-24T16:50:16,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:50:16,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:50:16,305 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:50:16,305 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:50:16,306 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,39595,1732467014577 2024-11-24T16:50:16,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:50:16,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,39595,1732467014577 in 272 msec 2024-11-24T16:50:16,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:50:16,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 725 msec 2024-11-24T16:50:16,318 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:50:16,318 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:50:16,339 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:50:16,341 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,39595,1732467014577, seqNum=-1] 2024-11-24T16:50:16,361 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:50:16,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54519, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:50:16,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 971 msec 2024-11-24T16:50:16,384 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467016384, completionTime=-1 2024-11-24T16:50:16,386 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:50:16,387 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:50:16,413 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:50:16,413 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467076413 2024-11-24T16:50:16,413 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467136413 2024-11-24T16:50:16,413 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-11-24T16:50:16,416 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,36731,1732467013895-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,416 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,36731,1732467013895-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,416 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,36731,1732467013895-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,418 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:36731, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,418 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,419 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,425 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:50:16,446 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.729sec 2024-11-24T16:50:16,447 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:50:16,448 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:50:16,449 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:50:16,450 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:50:16,450 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:50:16,450 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,36731,1732467013895-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:50:16,451 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,36731,1732467013895-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:50:16,459 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:50:16,460 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:50:16,460 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,36731,1732467013895-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:50:16,478 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4731d90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:50:16,480 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T16:50:16,480 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T16:50:16,484 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,36731,-1 for getting cluster id 2024-11-24T16:50:16,486 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:50:16,495 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '08c9fde8-6691-47cc-bb65-13aabba7967b' 2024-11-24T16:50:16,497 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:50:16,497 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "08c9fde8-6691-47cc-bb65-13aabba7967b" 2024-11-24T16:50:16,500 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e53afd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:50:16,500 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,36731,-1] 2024-11-24T16:50:16,502 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:50:16,504 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:50:16,505 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:50:16,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb3700d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:50:16,508 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:50:16,515 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,39595,1732467014577, seqNum=-1] 2024-11-24T16:50:16,516 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:50:16,518 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:50:16,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,36731,1732467013895 2024-11-24T16:50:16,561 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:50:16,572 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:50:16,577 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T16:50:16,587 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e335232f7491,36731,1732467013895 2024-11-24T16:50:16,590 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@40eeddb 2024-11-24T16:50:16,592 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T16:50:16,594 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T16:50:16,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T16:50:16,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T16:50:16,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:50:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-24T16:50:16,612 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T16:50:16,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-24T16:50:16,614 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:16,616 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T16:50:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:50:16,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741835_1011 (size=389) 2024-11-24T16:50:16,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741835_1011 (size=389) 2024-11-24T16:50:16,676 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 44e297aebabd4ff0f28596bef634f880, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b 2024-11-24T16:50:16,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741836_1012 (size=72) 2024-11-24T16:50:16,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741836_1012 (size=72) 2024-11-24T16:50:16,687 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:50:16,688 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 44e297aebabd4ff0f28596bef634f880, disabling compactions & flushes 2024-11-24T16:50:16,688 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:16,688 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:16,688 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. after waiting 0 ms 2024-11-24T16:50:16,688 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:16,688 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:16,688 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 44e297aebabd4ff0f28596bef634f880: Waiting for close lock at 1732467016687Disabling compacts and flushes for region at 1732467016687Disabling writes for close at 1732467016688 (+1 ms)Writing region close event to WAL at 1732467016688Closed at 1732467016688 2024-11-24T16:50:16,690 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T16:50:16,695 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732467016690"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467016690"}]},"ts":"1732467016690"} 2024-11-24T16:50:16,700 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T16:50:16,702 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T16:50:16,706 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467016702"}]},"ts":"1732467016702"} 2024-11-24T16:50:16,710 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-24T16:50:16,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=44e297aebabd4ff0f28596bef634f880, ASSIGN}] 2024-11-24T16:50:16,714 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=44e297aebabd4ff0f28596bef634f880, ASSIGN 2024-11-24T16:50:16,716 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=44e297aebabd4ff0f28596bef634f880, ASSIGN; state=OFFLINE, location=e335232f7491,39595,1732467014577; forceNewPlan=false, retain=false 2024-11-24T16:50:16,868 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=44e297aebabd4ff0f28596bef634f880, regionState=OPENING, regionLocation=e335232f7491,39595,1732467014577 2024-11-24T16:50:16,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=44e297aebabd4ff0f28596bef634f880, ASSIGN because future has completed 2024-11-24T16:50:16,874 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 44e297aebabd4ff0f28596bef634f880, server=e335232f7491,39595,1732467014577}] 2024-11-24T16:50:17,035 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:17,035 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 44e297aebabd4ff0f28596bef634f880, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:50:17,036 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,036 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:50:17,036 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,036 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,038 INFO [StoreOpener-44e297aebabd4ff0f28596bef634f880-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,041 INFO [StoreOpener-44e297aebabd4ff0f28596bef634f880-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44e297aebabd4ff0f28596bef634f880 columnFamilyName info 2024-11-24T16:50:17,041 DEBUG [StoreOpener-44e297aebabd4ff0f28596bef634f880-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:50:17,043 INFO [StoreOpener-44e297aebabd4ff0f28596bef634f880-1 {}] regionserver.HStore(327): Store=44e297aebabd4ff0f28596bef634f880/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:50:17,043 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,044 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,045 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,045 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,045 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,048 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,051 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:50:17,052 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 44e297aebabd4ff0f28596bef634f880; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798185, jitterRate=0.014945372939109802}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:50:17,052 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:17,053 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 44e297aebabd4ff0f28596bef634f880: Running coprocessor pre-open hook at 1732467017036Writing region info on filesystem at 1732467017036Initializing all the Stores at 1732467017038 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467017038Cleaning up temporary data from old regions at 1732467017045 (+7 ms)Running coprocessor post-open hooks at 1732467017052 (+7 ms)Region opened successfully at 1732467017053 (+1 ms) 2024-11-24T16:50:17,055 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880., pid=6, masterSystemTime=1732467017028 2024-11-24T16:50:17,060 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:17,060 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:17,061 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=44e297aebabd4ff0f28596bef634f880, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,39595,1732467014577 2024-11-24T16:50:17,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 44e297aebabd4ff0f28596bef634f880, server=e335232f7491,39595,1732467014577 because future has completed 2024-11-24T16:50:17,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T16:50:17,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 44e297aebabd4ff0f28596bef634f880, server=e335232f7491,39595,1732467014577 in 193 msec 2024-11-24T16:50:17,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T16:50:17,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=44e297aebabd4ff0f28596bef634f880, ASSIGN in 359 msec 2024-11-24T16:50:17,077 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T16:50:17,077 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467017077"}]},"ts":"1732467017077"} 2024-11-24T16:50:17,080 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-24T16:50:17,082 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T16:50:17,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 478 msec 2024-11-24T16:50:21,756 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T16:50:21,809 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T16:50:21,810 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-24T16:50:24,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T16:50:24,302 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T16:50:24,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T16:50:24,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T16:50:24,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:50:24,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T16:50:24,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T16:50:24,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T16:50:26,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36731 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:50:26,653 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-24T16:50:26,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-24T16:50:26,663 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-24T16:50:26,664 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:50:26,665 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467026665 2024-11-24T16:50:26,673 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:26,674 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:26,674 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:26,674 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:26,674 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:26,675 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467015837 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467026665 2024-11-24T16:50:26,676 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36531:36531),(127.0.0.1/127.0.0.1:40043:40043)] 2024-11-24T16:50:26,676 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467015837 is not closed yet, will try archiving it next time 2024-11-24T16:50:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741833_1009 (size=451) 2024-11-24T16:50:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741833_1009 (size=451) 2024-11-24T16:50:26,679 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467015837 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467015837 2024-11-24T16:50:26,684 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880., hostname=e335232f7491,39595,1732467014577, seqNum=2] 2024-11-24T16:50:38,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] regionserver.HRegion(8855): Flush requested on 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:38,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44e297aebabd4ff0f28596bef634f880 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:50:38,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/3def986369f84a54ad452a57fe12aa7e is 1080, key is row0001/info:/1732467026686/Put/seqid=0 2024-11-24T16:50:38,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741838_1014 (size=12509) 2024-11-24T16:50:38,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741838_1014 (size=12509) 2024-11-24T16:50:38,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/3def986369f84a54ad452a57fe12aa7e 2024-11-24T16:50:38,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/3def986369f84a54ad452a57fe12aa7e as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e 2024-11-24T16:50:38,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T16:50:38,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 44e297aebabd4ff0f28596bef634f880 in 144ms, sequenceid=11, compaction requested=false 2024-11-24T16:50:38,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44e297aebabd4ff0f28596bef634f880: 2024-11-24T16:50:42,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:50:46,730 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467046729 2024-11-24T16:50:46,942 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:50:46,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:46,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:46,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:46,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:46,943 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:46,944 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467026665 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467046729 2024-11-24T16:50:46,945 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:50:46,946 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467026665 is not closed yet, will try archiving it next time 2024-11-24T16:50:46,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741837_1013 (size=12399) 2024-11-24T16:50:46,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741837_1013 (size=12399) 2024-11-24T16:50:47,154 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:49,358 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:51,563 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:53,768 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:53,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] regionserver.HRegion(8855): Flush requested on 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:50:53,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44e297aebabd4ff0f28596bef634f880 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:50:53,970 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:53,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/65de1b1fe5564ac3a042150f98e91c4b is 1080, key is row0008/info:/1732467040717/Put/seqid=0 2024-11-24T16:50:53,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741840_1016 (size=12509) 2024-11-24T16:50:53,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741840_1016 (size=12509) 2024-11-24T16:50:53,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/65de1b1fe5564ac3a042150f98e91c4b 2024-11-24T16:50:54,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/65de1b1fe5564ac3a042150f98e91c4b as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/65de1b1fe5564ac3a042150f98e91c4b 2024-11-24T16:50:54,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/65de1b1fe5564ac3a042150f98e91c4b, entries=7, sequenceid=21, filesize=12.2 K 2024-11-24T16:50:54,215 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:54,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 44e297aebabd4ff0f28596bef634f880 in 447ms, sequenceid=21, compaction requested=false 2024-11-24T16:50:54,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44e297aebabd4ff0f28596bef634f880: 2024-11-24T16:50:54,216 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-24T16:50:54,216 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:50:54,217 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e because midkey is the same as first or last row 2024-11-24T16:50:55,973 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:57,407 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T16:50:57,407 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T16:50:58,177 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:58,178 WARN [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:58,179 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39595%2C1732467014577:(num 1732467046729) roll requested 2024-11-24T16:50:58,180 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467058180 2024-11-24T16:50:58,388 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:50:58,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:58,388 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:58,388 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:58,388 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:58,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:50:58,389 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467046729 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467058180 2024-11-24T16:50:58,390 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36531:36531),(127.0.0.1/127.0.0.1:40043:40043)] 2024-11-24T16:50:58,390 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467046729 is not closed yet, will try archiving it next time 2024-11-24T16:50:58,390 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467026665 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467026665 2024-11-24T16:50:58,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741839_1015 (size=7739) 2024-11-24T16:50:58,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741839_1015 (size=7739) 2024-11-24T16:51:00,381 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:02,036 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 44e297aebabd4ff0f28596bef634f880, had cached 0 bytes from a total of 25018 2024-11-24T16:51:02,586 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:04,790 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:06,993 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:08,995 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T16:51:08,996 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467068996 2024-11-24T16:51:12,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:51:14,004 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:14,006 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:14,006 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39595%2C1732467014577:(num 1732467068996) roll requested 2024-11-24T16:51:14,007 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:14,007 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:14,007 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:14,007 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:14,007 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:14,008 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467058180 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467068996 2024-11-24T16:51:14,009 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:51:14,009 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467058180 is not closed yet, will try archiving it next time 2024-11-24T16:51:14,009 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467074009 2024-11-24T16:51:14,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741841_1017 (size=4753) 2024-11-24T16:51:14,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741841_1017 (size=4753) 2024-11-24T16:51:19,012 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:51:19,012 WARN [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:51:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] regionserver.HRegion(8855): Flush requested on 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:51:19,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44e297aebabd4ff0f28596bef634f880 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:51:19,022 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:51:19,022 WARN [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:51:21,013 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T16:51:24,015 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:51:24,015 WARN [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK], DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK]] 2024-11-24T16:51:24,015 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:24,015 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:24,015 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:24,015 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:24,016 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:24,016 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467068996 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467074009 2024-11-24T16:51:24,017 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36531:36531),(127.0.0.1/127.0.0.1:40043:40043)] 2024-11-24T16:51:24,017 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467068996 is not closed yet, will try archiving it next time 2024-11-24T16:51:24,017 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39595%2C1732467014577:(num 1732467074009) roll requested 2024-11-24T16:51:24,018 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467084018 2024-11-24T16:51:24,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741842_1018 (size=1569) 2024-11-24T16:51:24,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741842_1018 (size=1569) 2024-11-24T16:51:24,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/5640aed7097a4a5b803ead0b606dec3a is 1080, key is row0015/info:/1732467055771/Put/seqid=0 2024-11-24T16:51:24,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741844_1020 (size=12509) 2024-11-24T16:51:24,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741844_1020 (size=12509) 2024-11-24T16:51:24,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/5640aed7097a4a5b803ead0b606dec3a 2024-11-24T16:51:24,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/5640aed7097a4a5b803ead0b606dec3a as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/5640aed7097a4a5b803ead0b606dec3a 2024-11-24T16:51:24,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/5640aed7097a4a5b803ead0b606dec3a, entries=7, sequenceid=31, filesize=12.2 K 2024-11-24T16:51:29,027 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:29,027 WARN [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:29,049 INFO [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:29,049 WARN [FSHLog-0-hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b-prefix:e335232f7491,39595,1732467014577 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33785,DS-228fca95-a711-4772-b7d9-fa822ba4d913,DISK], DatanodeInfoWithStorage[127.0.0.1:34837,DS-422f6137-c5df-4bd0-92ec-35f82afcf6f5,DISK]] 2024-11-24T16:51:29,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 44e297aebabd4ff0f28596bef634f880 in 10037ms, sequenceid=31, compaction requested=true 2024-11-24T16:51:29,049 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44e297aebabd4ff0f28596bef634f880: 2024-11-24T16:51:29,050 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,050 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-24T16:51:29,050 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,050 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:51:29,050 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,050 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e because midkey is the same as first or last row 2024-11-24T16:51:29,050 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,050 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467074009 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467084018 2024-11-24T16:51:29,051 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:51:29,051 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467074009 is not closed yet, will try archiving it next time 2024-11-24T16:51:29,051 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467046729 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467046729 2024-11-24T16:51:29,051 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39595%2C1732467014577:(num 1732467089051) roll requested 2024-11-24T16:51:29,052 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467089051 2024-11-24T16:51:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 44e297aebabd4ff0f28596bef634f880:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:51:29,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741843_1019 (size=438) 2024-11-24T16:51:29,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741843_1019 (size=438) 2024-11-24T16:51:29,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:51:29,055 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467058180 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467058180 2024-11-24T16:51:29,057 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467068996 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467068996 2024-11-24T16:51:29,057 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:51:29,059 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467074009 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467074009 2024-11-24T16:51:29,060 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:51:29,061 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,061 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,061 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,061 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,061 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,062 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467084018 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467089051 2024-11-24T16:51:29,062 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.HStore(1541): 44e297aebabd4ff0f28596bef634f880/info is initiating minor compaction (all files) 2024-11-24T16:51:29,063 INFO [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 44e297aebabd4ff0f28596bef634f880/info in TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:51:29,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741845_1021 (size=93) 2024-11-24T16:51:29,063 INFO [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/65de1b1fe5564ac3a042150f98e91c4b, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/5640aed7097a4a5b803ead0b606dec3a] into tmpdir=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp, totalSize=36.6 K 2024-11-24T16:51:29,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741845_1021 (size=93) 2024-11-24T16:51:29,065 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:51:29,065 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467084018 to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs/e335232f7491%2C39595%2C1732467014577.1732467084018 2024-11-24T16:51:29,065 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3def986369f84a54ad452a57fe12aa7e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732467026686 2024-11-24T16:51:29,065 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39595%2C1732467014577.1732467089065 2024-11-24T16:51:29,066 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] compactions.Compactor(225): Compacting 65de1b1fe5564ac3a042150f98e91c4b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732467040717 2024-11-24T16:51:29,067 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5640aed7097a4a5b803ead0b606dec3a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732467055771 2024-11-24T16:51:29,072 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,072 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,072 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,072 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,072 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:29,073 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467089051 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467089065 2024-11-24T16:51:29,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741846_1022 (size=1258) 2024-11-24T16:51:29,075 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-24T16:51:29,075 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/WALs/e335232f7491,39595,1732467014577/e335232f7491%2C39595%2C1732467014577.1732467089051 is not closed yet, will try archiving it next time 2024-11-24T16:51:29,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741846_1022 (size=1258) 2024-11-24T16:51:29,099 INFO [RS:0;e335232f7491:39595-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 44e297aebabd4ff0f28596bef634f880#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:51:29,100 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/52172f063994453b8b46892ff86a7697 is 1080, key is row0001/info:/1732467026686/Put/seqid=0 2024-11-24T16:51:29,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741848_1024 (size=27710) 2024-11-24T16:51:29,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741848_1024 (size=27710) 2024-11-24T16:51:29,117 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/52172f063994453b8b46892ff86a7697 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/52172f063994453b8b46892ff86a7697 2024-11-24T16:51:29,134 INFO [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 44e297aebabd4ff0f28596bef634f880/info of 44e297aebabd4ff0f28596bef634f880 into 52172f063994453b8b46892ff86a7697(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:51:29,135 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 44e297aebabd4ff0f28596bef634f880: 2024-11-24T16:51:29,136 INFO [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880., storeName=44e297aebabd4ff0f28596bef634f880/info, priority=13, startTime=1732467089051; duration=0sec 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/52172f063994453b8b46892ff86a7697 because midkey is the same as first or last row 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/52172f063994453b8b46892ff86a7697 because midkey is the same as first or last row 2024-11-24T16:51:29,137 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T16:51:29,138 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:51:29,138 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/52172f063994453b8b46892ff86a7697 because midkey is the same as first or last row 2024-11-24T16:51:29,138 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:51:29,138 DEBUG [RS:0;e335232f7491:39595-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 44e297aebabd4ff0f28596bef634f880:info 2024-11-24T16:51:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39595 {}] regionserver.HRegion(8855): Flush requested on 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:51:41,091 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44e297aebabd4ff0f28596bef634f880 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:51:41,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/de10db81fb494dfd9600357d7567c1e9 is 1080, key is row0022/info:/1732467089067/Put/seqid=0 2024-11-24T16:51:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741849_1025 (size=12509) 2024-11-24T16:51:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741849_1025 (size=12509) 2024-11-24T16:51:41,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/de10db81fb494dfd9600357d7567c1e9 2024-11-24T16:51:41,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/de10db81fb494dfd9600357d7567c1e9 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/de10db81fb494dfd9600357d7567c1e9 2024-11-24T16:51:41,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/de10db81fb494dfd9600357d7567c1e9, entries=7, sequenceid=42, filesize=12.2 K 2024-11-24T16:51:41,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 44e297aebabd4ff0f28596bef634f880 in 35ms, sequenceid=42, compaction requested=false 2024-11-24T16:51:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44e297aebabd4ff0f28596bef634f880: 2024-11-24T16:51:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-24T16:51:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:51:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/52172f063994453b8b46892ff86a7697 because midkey is the same as first or last row 2024-11-24T16:51:42,986 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:51:47,037 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 44e297aebabd4ff0f28596bef634f880, had cached 0 bytes from a total of 40219 2024-11-24T16:51:49,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:51:49,102 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:51:49,102 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:51:49,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:49,108 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:49,108 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:51:49,108 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:51:49,108 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1718951232, stopped=false 2024-11-24T16:51:49,109 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,36731,1732467013895 2024-11-24T16:51:49,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:49,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:49,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:49,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:49,111 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:51:49,111 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:51:49,111 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:51:49,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:49,112 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:49,112 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:49,112 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,39595,1732467014577' ***** 2024-11-24T16:51:49,112 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:51:49,112 INFO [RS:0;e335232f7491:39595 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:51:49,113 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:51:49,113 INFO [RS:0;e335232f7491:39595 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:51:49,113 INFO [RS:0;e335232f7491:39595 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:51:49,113 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(3091): Received CLOSE for 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:51:49,114 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(959): stopping server e335232f7491,39595,1732467014577 2024-11-24T16:51:49,114 INFO [RS:0;e335232f7491:39595 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:51:49,114 INFO [RS:0;e335232f7491:39595 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:39595. 2024-11-24T16:51:49,114 DEBUG [RS:0;e335232f7491:39595 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:51:49,114 DEBUG [RS:0;e335232f7491:39595 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:49,114 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 44e297aebabd4ff0f28596bef634f880, disabling compactions & flushes 2024-11-24T16:51:49,114 INFO [RS:0;e335232f7491:39595 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:51:49,114 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:51:49,114 INFO [RS:0;e335232f7491:39595 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:51:49,115 INFO [RS:0;e335232f7491:39595 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:51:49,114 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:51:49,115 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. after waiting 0 ms 2024-11-24T16:51:49,115 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:51:49,115 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:51:49,115 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 44e297aebabd4ff0f28596bef634f880 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T16:51:49,115 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T16:51:49,115 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:51:49,115 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 44e297aebabd4ff0f28596bef634f880=TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.} 2024-11-24T16:51:49,115 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:51:49,115 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:51:49,115 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:51:49,115 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:51:49,116 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-24T16:51:49,116 DEBUG [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 44e297aebabd4ff0f28596bef634f880 2024-11-24T16:51:49,121 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/3a693ac721af4ef9b67c453f66434177 is 1080, key is row0029/info:/1732467103093/Put/seqid=0 2024-11-24T16:51:49,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741850_1026 (size=8193) 2024-11-24T16:51:49,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741850_1026 (size=8193) 2024-11-24T16:51:49,129 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/3a693ac721af4ef9b67c453f66434177 2024-11-24T16:51:49,138 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/.tmp/info/3a693ac721af4ef9b67c453f66434177 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3a693ac721af4ef9b67c453f66434177 2024-11-24T16:51:49,142 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/info/0ab49256640e465485672e7ebd64278a is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880./info:regioninfo/1732467017060/Put/seqid=0 2024-11-24T16:51:49,147 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3a693ac721af4ef9b67c453f66434177, entries=3, sequenceid=48, filesize=8.0 K 2024-11-24T16:51:49,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741851_1027 (size=7016) 2024-11-24T16:51:49,149 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 44e297aebabd4ff0f28596bef634f880 in 34ms, sequenceid=48, compaction requested=true 2024-11-24T16:51:49,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741851_1027 (size=7016) 2024-11-24T16:51:49,150 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/65de1b1fe5564ac3a042150f98e91c4b, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/5640aed7097a4a5b803ead0b606dec3a] to archive 2024-11-24T16:51:49,152 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/info/0ab49256640e465485672e7ebd64278a 2024-11-24T16:51:49,153 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T16:51:49,156 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/archive/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/3def986369f84a54ad452a57fe12aa7e 2024-11-24T16:51:49,158 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/65de1b1fe5564ac3a042150f98e91c4b to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/archive/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/65de1b1fe5564ac3a042150f98e91c4b 2024-11-24T16:51:49,160 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/5640aed7097a4a5b803ead0b606dec3a to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/archive/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/info/5640aed7097a4a5b803ead0b606dec3a 2024-11-24T16:51:49,172 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e335232f7491:36731 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T16:51:49,177 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3def986369f84a54ad452a57fe12aa7e=12509, 65de1b1fe5564ac3a042150f98e91c4b=12509, 5640aed7097a4a5b803ead0b606dec3a=12509] 2024-11-24T16:51:49,179 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/ns/15b64bee781f4187ac68cc6c150709d3 is 43, key is default/ns:d/1732467016368/Put/seqid=0 2024-11-24T16:51:49,184 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/default/TestLogRolling-testSlowSyncLogRolling/44e297aebabd4ff0f28596bef634f880/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-24T16:51:49,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741852_1028 (size=5153) 2024-11-24T16:51:49,187 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:51:49,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741852_1028 (size=5153) 2024-11-24T16:51:49,187 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 44e297aebabd4ff0f28596bef634f880: Waiting for close lock at 1732467109114Running coprocessor pre-close hooks at 1732467109114Disabling compacts and flushes for region at 1732467109114Disabling writes for close at 1732467109115 (+1 ms)Obtaining lock to block concurrent updates at 1732467109115Preparing flush snapshotting stores in 44e297aebabd4ff0f28596bef634f880 at 1732467109115Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732467109115Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. at 1732467109116 (+1 ms)Flushing 44e297aebabd4ff0f28596bef634f880/info: creating writer at 1732467109116Flushing 44e297aebabd4ff0f28596bef634f880/info: appending metadata at 1732467109120 (+4 ms)Flushing 44e297aebabd4ff0f28596bef634f880/info: closing flushed file at 1732467109120Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67fb5b8e: reopening flushed file at 1732467109137 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 44e297aebabd4ff0f28596bef634f880 in 34ms, sequenceid=48, compaction requested=true at 1732467109149 (+12 ms)Writing region close event to WAL at 1732467109178 (+29 ms)Running coprocessor post-close hooks at 1732467109185 (+7 ms)Closed at 1732467109187 (+2 ms) 2024-11-24T16:51:49,187 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/ns/15b64bee781f4187ac68cc6c150709d3 2024-11-24T16:51:49,188 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732467016596.44e297aebabd4ff0f28596bef634f880. 2024-11-24T16:51:49,210 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/table/68121e9b1f944573a88fb5ef799aada9 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732467017077/Put/seqid=0 2024-11-24T16:51:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741853_1029 (size=5396) 2024-11-24T16:51:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741853_1029 (size=5396) 2024-11-24T16:51:49,217 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/table/68121e9b1f944573a88fb5ef799aada9 2024-11-24T16:51:49,225 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/info/0ab49256640e465485672e7ebd64278a as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/info/0ab49256640e465485672e7ebd64278a 2024-11-24T16:51:49,234 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/info/0ab49256640e465485672e7ebd64278a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T16:51:49,236 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/ns/15b64bee781f4187ac68cc6c150709d3 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/ns/15b64bee781f4187ac68cc6c150709d3 2024-11-24T16:51:49,245 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/ns/15b64bee781f4187ac68cc6c150709d3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T16:51:49,247 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/.tmp/table/68121e9b1f944573a88fb5ef799aada9 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/table/68121e9b1f944573a88fb5ef799aada9 2024-11-24T16:51:49,255 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/table/68121e9b1f944573a88fb5ef799aada9, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T16:51:49,257 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-11-24T16:51:49,263 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T16:51:49,264 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:51:49,264 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:51:49,264 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467109115Running coprocessor pre-close hooks at 1732467109115Disabling compacts and flushes for region at 1732467109115Disabling writes for close at 1732467109115Obtaining lock to block concurrent updates at 1732467109116 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732467109116Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732467109116Flushing stores of hbase:meta,,1.1588230740 at 1732467109117 (+1 ms)Flushing 1588230740/info: creating writer at 1732467109117Flushing 1588230740/info: appending metadata at 1732467109141 (+24 ms)Flushing 1588230740/info: closing flushed file at 1732467109141Flushing 1588230740/ns: creating writer at 1732467109162 (+21 ms)Flushing 1588230740/ns: appending metadata at 1732467109178 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732467109178Flushing 1588230740/table: creating writer at 1732467109194 (+16 ms)Flushing 1588230740/table: appending metadata at 1732467109210 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732467109210Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34d7b00e: reopening flushed file at 1732467109224 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f82317d: reopening flushed file at 1732467109234 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f224bd6: reopening flushed file at 1732467109245 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1732467109257 (+12 ms)Writing region close event to WAL at 1732467109258 (+1 ms)Running coprocessor post-close hooks at 1732467109264 (+6 ms)Closed at 1732467109264 2024-11-24T16:51:49,265 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:51:49,316 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(976): stopping server e335232f7491,39595,1732467014577; all regions closed. 2024-11-24T16:51:49,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741834_1010 (size=3066) 2024-11-24T16:51:49,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741834_1010 (size=3066) 2024-11-24T16:51:49,326 DEBUG [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs 2024-11-24T16:51:49,326 INFO [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C39595%2C1732467014577.meta:.meta(num 1732467016229) 2024-11-24T16:51:49,326 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,326 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,327 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,327 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,327 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741847_1023 (size=12695) 2024-11-24T16:51:49,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741847_1023 (size=12695) 2024-11-24T16:51:49,333 DEBUG [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/oldWALs 2024-11-24T16:51:49,333 INFO [RS:0;e335232f7491:39595 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C39595%2C1732467014577:(num 1732467089065) 2024-11-24T16:51:49,333 DEBUG [RS:0;e335232f7491:39595 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:49,333 INFO [RS:0;e335232f7491:39595 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:51:49,333 INFO [RS:0;e335232f7491:39595 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:51:49,334 INFO [RS:0;e335232f7491:39595 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T16:51:49,334 INFO [RS:0;e335232f7491:39595 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:51:49,334 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:51:49,334 INFO [RS:0;e335232f7491:39595 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39595 2024-11-24T16:51:49,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,39595,1732467014577 2024-11-24T16:51:49,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:51:49,339 INFO [RS:0;e335232f7491:39595 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:51:49,341 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,39595,1732467014577] 2024-11-24T16:51:49,344 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,39595,1732467014577 already deleted, retry=false 2024-11-24T16:51:49,344 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,39595,1732467014577 expired; onlineServers=0 2024-11-24T16:51:49,344 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,36731,1732467013895' ***** 2024-11-24T16:51:49,344 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:51:49,344 INFO [M:0;e335232f7491:36731 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:51:49,344 INFO [M:0;e335232f7491:36731 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:51:49,345 DEBUG [M:0;e335232f7491:36731 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:51:49,345 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:51:49,345 DEBUG [M:0;e335232f7491:36731 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:51:49,345 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467015515 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467015515,5,FailOnTimeoutGroup] 2024-11-24T16:51:49,345 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467015514 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467015514,5,FailOnTimeoutGroup] 2024-11-24T16:51:49,345 INFO [M:0;e335232f7491:36731 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:51:49,345 INFO [M:0;e335232f7491:36731 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:51:49,345 DEBUG [M:0;e335232f7491:36731 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:51:49,345 INFO [M:0;e335232f7491:36731 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:51:49,345 INFO [M:0;e335232f7491:36731 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:51:49,346 INFO [M:0;e335232f7491:36731 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:51:49,346 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:51:49,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:51:49,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:49,347 DEBUG [M:0;e335232f7491:36731 {}] zookeeper.ZKUtil(347): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T16:51:49,347 WARN [M:0;e335232f7491:36731 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T16:51:49,348 INFO [M:0;e335232f7491:36731 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/.lastflushedseqids 2024-11-24T16:51:49,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741854_1030 (size=130) 2024-11-24T16:51:49,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741854_1030 (size=130) 2024-11-24T16:51:49,363 INFO [M:0;e335232f7491:36731 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:51:49,363 INFO [M:0;e335232f7491:36731 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:51:49,363 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:51:49,363 INFO [M:0;e335232f7491:36731 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:49,363 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:49,363 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:51:49,363 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:49,364 INFO [M:0;e335232f7491:36731 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-24T16:51:49,388 DEBUG [M:0;e335232f7491:36731 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/21a05813d8f840a1bd546f3172320bd2 is 82, key is hbase:meta,,1/info:regioninfo/1732467016298/Put/seqid=0 2024-11-24T16:51:49,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741855_1031 (size=5672) 2024-11-24T16:51:49,401 INFO [M:0;e335232f7491:36731 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/21a05813d8f840a1bd546f3172320bd2 2024-11-24T16:51:49,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741855_1031 (size=5672) 2024-11-24T16:51:49,429 DEBUG [M:0;e335232f7491:36731 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2da9e72758a5448d95aab70d868e46e3 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732467017085/Put/seqid=0 2024-11-24T16:51:49,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741856_1032 (size=6247) 2024-11-24T16:51:49,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741856_1032 (size=6247) 2024-11-24T16:51:49,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:49,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39595-0x1014ae2a04b0001, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:49,442 INFO [RS:0;e335232f7491:39595 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:51:49,442 INFO [RS:0;e335232f7491:39595 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,39595,1732467014577; zookeeper connection closed. 2024-11-24T16:51:49,443 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@373cb160 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@373cb160 2024-11-24T16:51:49,443 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T16:51:49,686 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:51:49,839 INFO [M:0;e335232f7491:36731 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2da9e72758a5448d95aab70d868e46e3 2024-11-24T16:51:49,848 INFO [M:0;e335232f7491:36731 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2da9e72758a5448d95aab70d868e46e3 2024-11-24T16:51:49,870 DEBUG [M:0;e335232f7491:36731 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b4f4fc56a05d490fbd11928342334ed9 is 69, key is e335232f7491,39595,1732467014577/rs:state/1732467015604/Put/seqid=0 2024-11-24T16:51:49,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741857_1033 (size=5156) 2024-11-24T16:51:49,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741857_1033 (size=5156) 2024-11-24T16:51:49,892 INFO [M:0;e335232f7491:36731 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b4f4fc56a05d490fbd11928342334ed9 2024-11-24T16:51:49,918 DEBUG [M:0;e335232f7491:36731 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e29b3048ba774d9f80ca61d5f0f9fa38 is 52, key is load_balancer_on/state:d/1732467016567/Put/seqid=0 2024-11-24T16:51:49,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741858_1034 (size=5056) 2024-11-24T16:51:49,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741858_1034 (size=5056) 2024-11-24T16:51:49,927 INFO [M:0;e335232f7491:36731 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e29b3048ba774d9f80ca61d5f0f9fa38 2024-11-24T16:51:49,936 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/21a05813d8f840a1bd546f3172320bd2 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/21a05813d8f840a1bd546f3172320bd2 2024-11-24T16:51:49,944 INFO [M:0;e335232f7491:36731 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/21a05813d8f840a1bd546f3172320bd2, entries=8, sequenceid=59, filesize=5.5 K 2024-11-24T16:51:49,945 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2da9e72758a5448d95aab70d868e46e3 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2da9e72758a5448d95aab70d868e46e3 2024-11-24T16:51:49,952 INFO [M:0;e335232f7491:36731 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2da9e72758a5448d95aab70d868e46e3 2024-11-24T16:51:49,953 INFO [M:0;e335232f7491:36731 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2da9e72758a5448d95aab70d868e46e3, entries=6, sequenceid=59, filesize=6.1 K 2024-11-24T16:51:49,954 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b4f4fc56a05d490fbd11928342334ed9 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b4f4fc56a05d490fbd11928342334ed9 2024-11-24T16:51:49,961 INFO [M:0;e335232f7491:36731 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b4f4fc56a05d490fbd11928342334ed9, entries=1, sequenceid=59, filesize=5.0 K 2024-11-24T16:51:49,963 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e29b3048ba774d9f80ca61d5f0f9fa38 as hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e29b3048ba774d9f80ca61d5f0f9fa38 2024-11-24T16:51:49,970 INFO [M:0;e335232f7491:36731 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e29b3048ba774d9f80ca61d5f0f9fa38, entries=1, sequenceid=59, filesize=4.9 K 2024-11-24T16:51:49,971 INFO [M:0;e335232f7491:36731 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 608ms, sequenceid=59, compaction requested=false 2024-11-24T16:51:49,973 INFO [M:0;e335232f7491:36731 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:49,973 DEBUG [M:0;e335232f7491:36731 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467109363Disabling compacts and flushes for region at 1732467109363Disabling writes for close at 1732467109363Obtaining lock to block concurrent updates at 1732467109364 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467109364Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732467109364Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467109365 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467109365Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467109387 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467109387Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467109409 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467109428 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467109428Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467109848 (+420 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467109869 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467109869Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467109899 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467109917 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467109917Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@537bb669: reopening flushed file at 1732467109935 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58e7a46f: reopening flushed file at 1732467109944 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@391b1af7: reopening flushed file at 1732467109953 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@678ab40: reopening flushed file at 1732467109962 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 608ms, sequenceid=59, compaction requested=false at 1732467109972 (+10 ms)Writing region close event to WAL at 1732467109973 (+1 ms)Closed at 1732467109973 2024-11-24T16:51:49,974 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,974 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,974 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,975 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,975 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33785 is added to blk_1073741830_1006 (size=27973) 2024-11-24T16:51:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34837 is added to blk_1073741830_1006 (size=27973) 2024-11-24T16:51:49,978 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:51:49,978 INFO [M:0;e335232f7491:36731 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:51:49,978 INFO [M:0;e335232f7491:36731 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36731 2024-11-24T16:51:49,978 INFO [M:0;e335232f7491:36731 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:51:50,080 INFO [M:0;e335232f7491:36731 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:51:50,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:50,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36731-0x1014ae2a04b0000, quorum=127.0.0.1:53097, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:50,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:50,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:51:50,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:51:50,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:51:50,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir/,STOPPED} 2024-11-24T16:51:50,090 WARN [BP-1592658052-172.17.0.2-1732467011005 heartbeating to localhost/127.0.0.1:36099 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:51:50,090 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:51:50,090 WARN [BP-1592658052-172.17.0.2-1732467011005 heartbeating to localhost/127.0.0.1:36099 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1592658052-172.17.0.2-1732467011005 (Datanode Uuid 46021085-9e33-4037-9df8-f8852bffa368) service to localhost/127.0.0.1:36099 2024-11-24T16:51:50,090 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:51:50,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data3/current/BP-1592658052-172.17.0.2-1732467011005 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:50,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data4/current/BP-1592658052-172.17.0.2-1732467011005 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:50,093 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:51:50,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:50,095 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:51:50,095 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:51:50,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:51:50,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir/,STOPPED} 2024-11-24T16:51:50,098 WARN [BP-1592658052-172.17.0.2-1732467011005 heartbeating to localhost/127.0.0.1:36099 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:51:50,098 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:51:50,099 WARN [BP-1592658052-172.17.0.2-1732467011005 heartbeating to localhost/127.0.0.1:36099 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1592658052-172.17.0.2-1732467011005 (Datanode Uuid c2a2ece3-fc78-4c9d-b94a-8a86518f6f03) service to localhost/127.0.0.1:36099 2024-11-24T16:51:50,099 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:51:50,099 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data1/current/BP-1592658052-172.17.0.2-1732467011005 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:50,099 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/cluster_5b8f1de5-3aca-f4bc-4834-dbdf2fcf5465/data/data2/current/BP-1592658052-172.17.0.2-1732467011005 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:50,100 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:51:50,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:51:50,110 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:51:50,110 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:51:50,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:51:50,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir/,STOPPED} 2024-11-24T16:51:50,119 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:51:50,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:51:50,161 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36099 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36099 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36099 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/e335232f7491:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36099 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36099 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/e335232f7491:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36099 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3e12dea0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36099 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36099 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e335232f7491:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=75 (was 201), ProcessCount=11 (was 11), AvailableMemoryMB=6960 (was 6834) - AvailableMemoryMB LEAK? - 2024-11-24T16:51:50,168 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=75, ProcessCount=11, AvailableMemoryMB=6959 2024-11-24T16:51:50,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.log.dir so I do NOT create it in target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a346dff-efff-c212-c414-b06ca171d0b9/hadoop.tmp.dir so I do NOT create it in target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c, deleteOnExit=true 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/test.cache.data in system properties and HBase conf 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:51:50,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:51:50,170 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:51:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:51:50,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:51:50,186 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:51:50,264 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:50,270 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:51:50,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:51:50,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:51:50,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:51:50,274 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:50,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a69944b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:51:50,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ae773f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:51:50,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1dc8c743{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/java.io.tmpdir/jetty-localhost-45321-hadoop-hdfs-3_4_1-tests_jar-_-any-11927946343206555412/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:51:50,392 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c4717a0{HTTP/1.1, (http/1.1)}{localhost:45321} 2024-11-24T16:51:50,392 INFO [Time-limited test {}] server.Server(415): Started @101354ms 2024-11-24T16:51:50,406 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:51:50,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:50,486 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:51:50,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:51:50,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:51:50,487 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:51:50,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@670e4080{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:51:50,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7247ee1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:51:50,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a249094{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/java.io.tmpdir/jetty-localhost-46479-hadoop-hdfs-3_4_1-tests_jar-_-any-6681686463805479801/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:50,605 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35445cbd{HTTP/1.1, (http/1.1)}{localhost:46479} 2024-11-24T16:51:50,605 INFO [Time-limited test {}] server.Server(415): Started @101567ms 2024-11-24T16:51:50,607 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:51:50,654 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:50,658 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:51:50,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:51:50,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:51:50,659 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:51:50,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f27499{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:51:50,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64e5ce98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:51:50,736 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data2/current/BP-1485803164-172.17.0.2-1732467110205/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:50,736 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data1/current/BP-1485803164-172.17.0.2-1732467110205/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:50,761 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:51:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba95ccac3798453f with lease ID 0x7417c6c4a4f0afa1: Processing first storage report for DS-5d2e4846-9a53-487d-9017-ea911a83614e from datanode DatanodeRegistration(127.0.0.1:46151, datanodeUuid=d05fe914-e1e2-4389-8e6e-c195b1c7c8c7, infoPort=37943, infoSecurePort=0, ipcPort=43639, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205) 2024-11-24T16:51:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba95ccac3798453f with lease ID 0x7417c6c4a4f0afa1: from storage DS-5d2e4846-9a53-487d-9017-ea911a83614e node DatanodeRegistration(127.0.0.1:46151, datanodeUuid=d05fe914-e1e2-4389-8e6e-c195b1c7c8c7, infoPort=37943, infoSecurePort=0, ipcPort=43639, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba95ccac3798453f with lease ID 0x7417c6c4a4f0afa1: Processing first storage report for DS-1e0459b9-6d42-4ec2-930b-cce214c45948 from datanode DatanodeRegistration(127.0.0.1:46151, datanodeUuid=d05fe914-e1e2-4389-8e6e-c195b1c7c8c7, infoPort=37943, infoSecurePort=0, ipcPort=43639, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205) 2024-11-24T16:51:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba95ccac3798453f with lease ID 0x7417c6c4a4f0afa1: from storage DS-1e0459b9-6d42-4ec2-930b-cce214c45948 node DatanodeRegistration(127.0.0.1:46151, datanodeUuid=d05fe914-e1e2-4389-8e6e-c195b1c7c8c7, infoPort=37943, infoSecurePort=0, ipcPort=43639, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:50,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@254b7745{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/java.io.tmpdir/jetty-localhost-44919-hadoop-hdfs-3_4_1-tests_jar-_-any-7692742544161562796/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:50,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@86e294e{HTTP/1.1, (http/1.1)}{localhost:44919} 2024-11-24T16:51:50,786 INFO [Time-limited test {}] server.Server(415): Started @101748ms 2024-11-24T16:51:50,788 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:51:50,905 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data3/current/BP-1485803164-172.17.0.2-1732467110205/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:50,905 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data4/current/BP-1485803164-172.17.0.2-1732467110205/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:50,925 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:51:50,927 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x718aec5f10f77a29 with lease ID 0x7417c6c4a4f0afa2: Processing first storage report for DS-64b96cfd-bfa5-46c0-a564-3c02224e1954 from datanode DatanodeRegistration(127.0.0.1:36611, datanodeUuid=a08036a4-6b45-40d2-b954-bc08ac0d7dba, infoPort=46541, infoSecurePort=0, ipcPort=35359, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205) 2024-11-24T16:51:50,928 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x718aec5f10f77a29 with lease ID 0x7417c6c4a4f0afa2: from storage DS-64b96cfd-bfa5-46c0-a564-3c02224e1954 node DatanodeRegistration(127.0.0.1:36611, datanodeUuid=a08036a4-6b45-40d2-b954-bc08ac0d7dba, infoPort=46541, infoSecurePort=0, ipcPort=35359, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:50,928 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x718aec5f10f77a29 with lease ID 0x7417c6c4a4f0afa2: Processing first storage report for DS-7154e9e9-4c69-430f-83ae-5d6aecfacce0 from datanode DatanodeRegistration(127.0.0.1:36611, datanodeUuid=a08036a4-6b45-40d2-b954-bc08ac0d7dba, infoPort=46541, infoSecurePort=0, ipcPort=35359, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205) 2024-11-24T16:51:50,928 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x718aec5f10f77a29 with lease ID 0x7417c6c4a4f0afa2: from storage DS-7154e9e9-4c69-430f-83ae-5d6aecfacce0 node DatanodeRegistration(127.0.0.1:36611, datanodeUuid=a08036a4-6b45-40d2-b954-bc08ac0d7dba, infoPort=46541, infoSecurePort=0, ipcPort=35359, storageInfo=lv=-57;cid=testClusterID;nsid=1083716889;c=1732467110205), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:51,019 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee 2024-11-24T16:51:51,023 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/zookeeper_0, clientPort=51845, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:51:51,024 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51845 2024-11-24T16:51:51,024 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:51:51,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:51:51,044 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d with version=8 2024-11-24T16:51:51,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:51:51,047 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:51:51,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:51,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:51,047 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:51:51,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:51,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:51:51,048 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:51:51,048 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:51:51,049 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45439 2024-11-24T16:51:51,050 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45439 connecting to ZooKeeper ensemble=127.0.0.1:51845 2024-11-24T16:51:51,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454390x0, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:51:51,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45439-0x1014ae41efd0000 connected 2024-11-24T16:51:51,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,090 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:51,090 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d, hbase.cluster.distributed=false 2024-11-24T16:51:51,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:51:51,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45439 2024-11-24T16:51:51,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45439 2024-11-24T16:51:51,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45439 2024-11-24T16:51:51,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45439 2024-11-24T16:51:51,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45439 2024-11-24T16:51:51,110 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:51:51,110 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:51,110 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:51,110 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:51:51,110 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:51,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:51:51,111 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:51:51,111 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:51:51,111 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33385 2024-11-24T16:51:51,113 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33385 connecting to ZooKeeper ensemble=127.0.0.1:51845 2024-11-24T16:51:51,114 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333850x0, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:51:51,122 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:51,122 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33385-0x1014ae41efd0001 connected 2024-11-24T16:51:51,122 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:51:51,123 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:51:51,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:51:51,125 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:51:51,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33385 2024-11-24T16:51:51,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33385 2024-11-24T16:51:51,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33385 2024-11-24T16:51:51,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33385 2024-11-24T16:51:51,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33385 2024-11-24T16:51:51,144 DEBUG [M:0;e335232f7491:45439 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:45439 2024-11-24T16:51:51,145 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,45439,1732467111047 2024-11-24T16:51:51,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:51,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:51,148 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,45439,1732467111047 2024-11-24T16:51:51,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:51:51,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,151 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:51:51,151 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,45439,1732467111047 from backup master directory 2024-11-24T16:51:51,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:51,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,45439,1732467111047 2024-11-24T16:51:51,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:51,159 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:51:51,159 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,45439,1732467111047 2024-11-24T16:51:51,166 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/hbase.id] with ID: 515b96e3-ffa6-41b6-b92a-ce528bf03a68 2024-11-24T16:51:51,166 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/.tmp/hbase.id 2024-11-24T16:51:51,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:51:51,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:51:51,175 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/.tmp/hbase.id]:[hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/hbase.id] 2024-11-24T16:51:51,194 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:51,195 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:51:51,199 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 4ms. 2024-11-24T16:51:51,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:51:51,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:51:51,222 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:51:51,223 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:51:51,224 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:51,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:51:51,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:51:51,236 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store 2024-11-24T16:51:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:51:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:51:51,252 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:51,252 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:51:51,252 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:51,252 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:51,253 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:51:51,253 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:51,253 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:51,253 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467111252Disabling compacts and flushes for region at 1732467111252Disabling writes for close at 1732467111253 (+1 ms)Writing region close event to WAL at 1732467111253Closed at 1732467111253 2024-11-24T16:51:51,254 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/.initializing 2024-11-24T16:51:51,254 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/WALs/e335232f7491,45439,1732467111047 2024-11-24T16:51:51,259 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C45439%2C1732467111047, suffix=, logDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/WALs/e335232f7491,45439,1732467111047, archiveDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/oldWALs, maxLogs=10 2024-11-24T16:51:51,260 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C45439%2C1732467111047.1732467111259 2024-11-24T16:51:51,266 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/WALs/e335232f7491,45439,1732467111047/e335232f7491%2C45439%2C1732467111047.1732467111259 2024-11-24T16:51:51,270 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37943:37943),(127.0.0.1/127.0.0.1:46541:46541)] 2024-11-24T16:51:51,277 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:51:51,277 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:51,277 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,277 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:51:51,282 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:51,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:51:51,285 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:51,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:51:51,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:51,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:51:51,291 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:51,292 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,293 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,294 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,295 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,295 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,296 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:51:51,298 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:51,301 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:51:51,302 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726398, jitterRate=-0.07633763551712036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:51:51,303 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467111277Initializing all the Stores at 1732467111279 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467111279Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467111280 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467111280Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467111280Cleaning up temporary data from old regions at 1732467111295 (+15 ms)Region opened successfully at 1732467111303 (+8 ms) 2024-11-24T16:51:51,308 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:51:51,313 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff8f69b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:51:51,314 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:51:51,315 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:51:51,315 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:51:51,315 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:51:51,316 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T16:51:51,316 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T16:51:51,316 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:51:51,324 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:51:51,325 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:51:51,327 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:51:51,327 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:51:51,328 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:51:51,329 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:51:51,330 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:51:51,331 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:51:51,336 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:51:51,338 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:51:51,339 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:51:51,342 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:51:51,343 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:51:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,346 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,45439,1732467111047, sessionid=0x1014ae41efd0000, setting cluster-up flag (Was=false) 2024-11-24T16:51:51,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,356 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:51:51,357 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,45439,1732467111047 2024-11-24T16:51:51,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,367 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:51:51,368 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,45439,1732467111047 2024-11-24T16:51:51,369 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:51:51,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:51,371 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:51:51,371 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:51:51,372 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,45439,1732467111047 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:51:51,373 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,374 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467141374 2024-11-24T16:51:51,374 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:51:51,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:51:51,375 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:51,376 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:51:51,376 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:51:51,376 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:51:51,376 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467111376,5,FailOnTimeoutGroup] 2024-11-24T16:51:51,376 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467111376,5,FailOnTimeoutGroup] 2024-11-24T16:51:51,376 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,376 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:51:51,376 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,377 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,377 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,377 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:51:51,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:51:51,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:51:51,433 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(746): ClusterId : 515b96e3-ffa6-41b6-b92a-ce528bf03a68 2024-11-24T16:51:51,434 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:51:51,450 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:51:51,450 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:51:51,465 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:51:51,466 DEBUG [RS:0;e335232f7491:33385 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f4e1f25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:51:51,480 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:33385 2024-11-24T16:51:51,481 INFO [RS:0;e335232f7491:33385 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:51:51,481 INFO [RS:0;e335232f7491:33385 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:51:51,481 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:51:51,482 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,45439,1732467111047 with port=33385, startcode=1732467111110 2024-11-24T16:51:51,483 DEBUG [RS:0;e335232f7491:33385 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:51:51,486 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:51:51,486 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45439 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,33385,1732467111110 2024-11-24T16:51:51,487 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45439 {}] master.ServerManager(517): Registering regionserver=e335232f7491,33385,1732467111110 2024-11-24T16:51:51,489 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d 2024-11-24T16:51:51,489 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42105 2024-11-24T16:51:51,489 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:51:51,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:51:51,507 DEBUG [RS:0;e335232f7491:33385 {}] zookeeper.ZKUtil(111): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,33385,1732467111110 2024-11-24T16:51:51,508 WARN [RS:0;e335232f7491:33385 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:51:51,508 INFO [RS:0;e335232f7491:33385 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:51,508 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/WALs/e335232f7491,33385,1732467111110 2024-11-24T16:51:51,508 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,33385,1732467111110] 2024-11-24T16:51:51,513 INFO [RS:0;e335232f7491:33385 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:51:51,518 INFO [RS:0;e335232f7491:33385 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:51:51,518 INFO [RS:0;e335232f7491:33385 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:51:51,518 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,519 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:51:51,520 INFO [RS:0;e335232f7491:33385 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:51:51,520 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,520 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,520 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,520 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,520 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:51:51,521 DEBUG [RS:0;e335232f7491:33385 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:51:51,525 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,525 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,525 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,525 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,525 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,525 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33385,1732467111110-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:51:51,549 INFO [RS:0;e335232f7491:33385 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:51:51,549 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33385,1732467111110-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,549 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,549 INFO [RS:0;e335232f7491:33385 {}] regionserver.Replication(171): e335232f7491,33385,1732467111110 started 2024-11-24T16:51:51,567 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:51,568 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,33385,1732467111110, RpcServer on e335232f7491/172.17.0.2:33385, sessionid=0x1014ae41efd0001 2024-11-24T16:51:51,568 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:51:51,568 DEBUG [RS:0;e335232f7491:33385 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,33385,1732467111110 2024-11-24T16:51:51,568 DEBUG [RS:0;e335232f7491:33385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,33385,1732467111110' 2024-11-24T16:51:51,568 DEBUG [RS:0;e335232f7491:33385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:51:51,569 DEBUG [RS:0;e335232f7491:33385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:51:51,570 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:51:51,570 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:51:51,570 DEBUG [RS:0;e335232f7491:33385 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,33385,1732467111110 2024-11-24T16:51:51,570 DEBUG [RS:0;e335232f7491:33385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,33385,1732467111110' 2024-11-24T16:51:51,570 DEBUG [RS:0;e335232f7491:33385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:51:51,570 DEBUG [RS:0;e335232f7491:33385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:51:51,571 DEBUG [RS:0;e335232f7491:33385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:51:51,571 INFO [RS:0;e335232f7491:33385 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:51:51,571 INFO [RS:0;e335232f7491:33385 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:51:51,674 INFO [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C33385%2C1732467111110, suffix=, logDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/WALs/e335232f7491,33385,1732467111110, archiveDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/oldWALs, maxLogs=32 2024-11-24T16:51:51,675 INFO [RS:0;e335232f7491:33385 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C33385%2C1732467111110.1732467111675 2024-11-24T16:51:51,683 INFO [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/WALs/e335232f7491,33385,1732467111110/e335232f7491%2C33385%2C1732467111110.1732467111675 2024-11-24T16:51:51,684 DEBUG [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46541:46541),(127.0.0.1/127.0.0.1:37943:37943)] 2024-11-24T16:51:51,788 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:51:51,788 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d 2024-11-24T16:51:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741833_1009 (size=32) 2024-11-24T16:51:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741833_1009 (size=32) 2024-11-24T16:51:51,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:51,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:51:51,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:51:51,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:51,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:51:51,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:51:51,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:51,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:51:51,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:51:51,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:51,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:51:51,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:51:51,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:51,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:51,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:51:51,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740 2024-11-24T16:51:51,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740 2024-11-24T16:51:51,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:51:51,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:51:51,817 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:51:51,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:51:51,821 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:51:51,822 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745085, jitterRate=-0.05257593095302582}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:51:51,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467111799Initializing all the Stores at 1732467111801 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467111801Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467111801Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467111801Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467111801Cleaning up temporary data from old regions at 1732467111816 (+15 ms)Region opened successfully at 1732467111823 (+7 ms) 2024-11-24T16:51:51,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:51:51,823 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:51:51,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:51:51,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:51:51,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:51:51,824 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:51:51,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467111823Disabling compacts and flushes for region at 1732467111823Disabling writes for close at 1732467111823Writing region close event to WAL at 1732467111824 (+1 ms)Closed at 1732467111824 2024-11-24T16:51:51,826 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:51,826 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:51:51,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:51:51,829 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:51:51,831 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:51:51,981 DEBUG [e335232f7491:45439 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:51:51,982 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,33385,1732467111110 2024-11-24T16:51:51,984 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,33385,1732467111110, state=OPENING 2024-11-24T16:51:51,985 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:51:51,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:51,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:51,988 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:51:51,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,33385,1732467111110}] 2024-11-24T16:51:51,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:52,142 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:51:52,144 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37727, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:51:52,149 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:51:52,149 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:52,151 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C33385%2C1732467111110.meta, suffix=.meta, logDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/WALs/e335232f7491,33385,1732467111110, archiveDir=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/oldWALs, maxLogs=32 2024-11-24T16:51:52,153 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C33385%2C1732467111110.meta.1732467112152.meta 2024-11-24T16:51:52,159 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/WALs/e335232f7491,33385,1732467111110/e335232f7491%2C33385%2C1732467111110.meta.1732467112152.meta 2024-11-24T16:51:52,160 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46541:46541),(127.0.0.1/127.0.0.1:37943:37943)] 2024-11-24T16:51:52,161 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:51:52,161 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:51:52,161 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:51:52,161 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:51:52,161 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:51:52,161 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:52,162 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:51:52,162 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:51:52,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:51:52,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:51:52,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:52,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:52,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:51:52,166 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:51:52,166 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:52,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:52,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:51:52,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:51:52,168 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:52,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:52,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:51:52,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:51:52,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:52,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:52,170 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:51:52,171 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740 2024-11-24T16:51:52,172 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740 2024-11-24T16:51:52,174 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:51:52,174 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:51:52,174 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:51:52,176 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:51:52,177 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764615, jitterRate=-0.027742907404899597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:51:52,177 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:51:52,178 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467112162Writing region info on filesystem at 1732467112162Initializing all the Stores at 1732467112163 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467112163Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467112163Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467112163Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467112163Cleaning up temporary data from old regions at 1732467112174 (+11 ms)Running coprocessor post-open hooks at 1732467112177 (+3 ms)Region opened successfully at 1732467112178 (+1 ms) 2024-11-24T16:51:52,179 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467112141 2024-11-24T16:51:52,182 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:51:52,182 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:51:52,183 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,33385,1732467111110 2024-11-24T16:51:52,184 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,33385,1732467111110, state=OPEN 2024-11-24T16:51:52,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:51:52,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:51:52,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:52,192 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,33385,1732467111110 2024-11-24T16:51:52,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:52,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:51:52,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,33385,1732467111110 in 204 msec 2024-11-24T16:51:52,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:51:52,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 368 msec 2024-11-24T16:51:52,200 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:52,200 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:51:52,202 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:51:52,202 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,33385,1732467111110, seqNum=-1] 2024-11-24T16:51:52,202 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:51:52,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37273, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:51:52,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 838 msec 2024-11-24T16:51:52,211 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467112211, completionTime=-1 2024-11-24T16:51:52,211 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:51:52,211 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:51:52,213 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:51:52,213 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467172213 2024-11-24T16:51:52,213 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467232213 2024-11-24T16:51:52,213 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T16:51:52,214 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45439,1732467111047-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,214 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45439,1732467111047-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,214 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45439,1732467111047-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,214 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:45439, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,214 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,214 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,216 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.060sec 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:51:52,219 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45439,1732467111047-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:51:52,220 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45439,1732467111047-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:51:52,222 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:51:52,222 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:51:52,223 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45439,1732467111047-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:52,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:51:52,234 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,45439,-1 for getting cluster id 2024-11-24T16:51:52,234 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:51:52,236 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '515b96e3-ffa6-41b6-b92a-ce528bf03a68' 2024-11-24T16:51:52,236 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:51:52,236 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "515b96e3-ffa6-41b6-b92a-ce528bf03a68" 2024-11-24T16:51:52,237 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8f8429, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:51:52,237 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,45439,-1] 2024-11-24T16:51:52,237 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:51:52,238 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:52,239 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:51:52,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:51:52,241 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:51:52,242 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,33385,1732467111110, seqNum=-1] 2024-11-24T16:51:52,242 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:51:52,244 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44428, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:51:52,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,45439,1732467111047 2024-11-24T16:51:52,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:52,249 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:51:52,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:51:52,250 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:51:52,250 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:51:52,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:52,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:52,250 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:51:52,250 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:51:52,250 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1712408714, stopped=false 2024-11-24T16:51:52,250 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,45439,1732467111047 2024-11-24T16:51:52,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:52,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:52,252 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:51:52,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:52,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:52,252 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:51:52,253 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:51:52,253 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:52,253 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,33385,1732467111110' ***** 2024-11-24T16:51:52,253 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:51:52,253 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:52,253 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:51:52,254 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(959): stopping server e335232f7491,33385,1732467111110 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:33385. 2024-11-24T16:51:52,254 DEBUG [RS:0;e335232f7491:33385 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:51:52,254 DEBUG [RS:0;e335232f7491:33385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:51:52,254 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:51:52,255 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T16:51:52,255 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T16:51:52,255 DEBUG [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T16:51:52,255 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:51:52,255 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:51:52,255 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:51:52,255 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:51:52,255 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:51:52,255 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T16:51:52,274 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/.tmp/ns/c04d64730eb84cb6a57f5c9aaa0c84ff is 43, key is default/ns:d/1732467112205/Put/seqid=0 2024-11-24T16:51:52,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741835_1011 (size=5153) 2024-11-24T16:51:52,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741835_1011 (size=5153) 2024-11-24T16:51:52,281 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/.tmp/ns/c04d64730eb84cb6a57f5c9aaa0c84ff 2024-11-24T16:51:52,288 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/.tmp/ns/c04d64730eb84cb6a57f5c9aaa0c84ff as hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/ns/c04d64730eb84cb6a57f5c9aaa0c84ff 2024-11-24T16:51:52,294 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/ns/c04d64730eb84cb6a57f5c9aaa0c84ff, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T16:51:52,296 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false 2024-11-24T16:51:52,296 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T16:51:52,301 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T16:51:52,302 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:51:52,302 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:51:52,302 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467112255Running coprocessor pre-close hooks at 1732467112255Disabling compacts and flushes for region at 1732467112255Disabling writes for close at 1732467112255Obtaining lock to block concurrent updates at 1732467112255Preparing flush snapshotting stores in 1588230740 at 1732467112255Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732467112256 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732467112257 (+1 ms)Flushing 1588230740/ns: creating writer at 1732467112257Flushing 1588230740/ns: appending metadata at 1732467112273 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732467112273Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3544f4f: reopening flushed file at 1732467112287 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false at 1732467112296 (+9 ms)Writing region close event to WAL at 1732467112297 (+1 ms)Running coprocessor post-close hooks at 1732467112302 (+5 ms)Closed at 1732467112302 2024-11-24T16:51:52,302 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:51:52,455 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(976): stopping server e335232f7491,33385,1732467111110; all regions closed. 2024-11-24T16:51:52,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741834_1010 (size=1152) 2024-11-24T16:51:52,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741834_1010 (size=1152) 2024-11-24T16:51:52,462 DEBUG [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/oldWALs 2024-11-24T16:51:52,462 INFO [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C33385%2C1732467111110.meta:.meta(num 1732467112152) 2024-11-24T16:51:52,462 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,463 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,463 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,463 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,463 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741832_1008 (size=93) 2024-11-24T16:51:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741832_1008 (size=93) 2024-11-24T16:51:52,467 DEBUG [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/oldWALs 2024-11-24T16:51:52,467 INFO [RS:0;e335232f7491:33385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C33385%2C1732467111110:(num 1732467111675) 2024-11-24T16:51:52,467 DEBUG [RS:0;e335232f7491:33385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:52,467 INFO [RS:0;e335232f7491:33385 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:51:52,467 INFO [RS:0;e335232f7491:33385 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:51:52,467 INFO [RS:0;e335232f7491:33385 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T16:51:52,468 INFO [RS:0;e335232f7491:33385 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:51:52,468 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:51:52,468 INFO [RS:0;e335232f7491:33385 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33385 2024-11-24T16:51:52,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,33385,1732467111110 2024-11-24T16:51:52,470 INFO [RS:0;e335232f7491:33385 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:51:52,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:51:52,471 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,33385,1732467111110] 2024-11-24T16:51:52,474 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,33385,1732467111110 already deleted, retry=false 2024-11-24T16:51:52,474 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,33385,1732467111110 expired; onlineServers=0 2024-11-24T16:51:52,474 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,45439,1732467111047' ***** 2024-11-24T16:51:52,474 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:51:52,474 INFO [M:0;e335232f7491:45439 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:51:52,474 INFO [M:0;e335232f7491:45439 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:51:52,474 DEBUG [M:0;e335232f7491:45439 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:51:52,475 DEBUG [M:0;e335232f7491:45439 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:51:52,475 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:51:52,475 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467111376 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467111376,5,FailOnTimeoutGroup] 2024-11-24T16:51:52,475 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467111376 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467111376,5,FailOnTimeoutGroup] 2024-11-24T16:51:52,475 INFO [M:0;e335232f7491:45439 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:51:52,475 INFO [M:0;e335232f7491:45439 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:51:52,475 DEBUG [M:0;e335232f7491:45439 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:51:52,475 INFO [M:0;e335232f7491:45439 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:51:52,475 INFO [M:0;e335232f7491:45439 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:51:52,475 INFO [M:0;e335232f7491:45439 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:51:52,475 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:51:52,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:51:52,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:52,477 DEBUG [M:0;e335232f7491:45439 {}] zookeeper.ZKUtil(347): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T16:51:52,477 WARN [M:0;e335232f7491:45439 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T16:51:52,477 INFO [M:0;e335232f7491:45439 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/.lastflushedseqids 2024-11-24T16:51:52,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741836_1012 (size=108) 2024-11-24T16:51:52,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741836_1012 (size=108) 2024-11-24T16:51:52,484 INFO [M:0;e335232f7491:45439 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:51:52,484 INFO [M:0;e335232f7491:45439 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:51:52,484 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:51:52,484 INFO [M:0;e335232f7491:45439 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:52,484 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:52,484 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:51:52,484 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:52,485 INFO [M:0;e335232f7491:45439 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T16:51:52,502 DEBUG [M:0;e335232f7491:45439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dafb75d0a714cb2a20560a60c638767 is 82, key is hbase:meta,,1/info:regioninfo/1732467112183/Put/seqid=0 2024-11-24T16:51:52,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741837_1013 (size=5672) 2024-11-24T16:51:52,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741837_1013 (size=5672) 2024-11-24T16:51:52,508 INFO [M:0;e335232f7491:45439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dafb75d0a714cb2a20560a60c638767 2024-11-24T16:51:52,531 DEBUG [M:0;e335232f7491:45439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d683f11d11c640598182cfaf5b92c0bf is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732467112210/Put/seqid=0 2024-11-24T16:51:52,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741838_1014 (size=5275) 2024-11-24T16:51:52,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741838_1014 (size=5275) 2024-11-24T16:51:52,537 INFO [M:0;e335232f7491:45439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d683f11d11c640598182cfaf5b92c0bf 2024-11-24T16:51:52,558 DEBUG [M:0;e335232f7491:45439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7a85ed0b406451faf4b469fbe6fbc28 is 69, key is e335232f7491,33385,1732467111110/rs:state/1732467111487/Put/seqid=0 2024-11-24T16:51:52,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741839_1015 (size=5156) 2024-11-24T16:51:52,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741839_1015 (size=5156) 2024-11-24T16:51:52,564 INFO [M:0;e335232f7491:45439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7a85ed0b406451faf4b469fbe6fbc28 2024-11-24T16:51:52,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:52,571 INFO [RS:0;e335232f7491:33385 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:51:52,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33385-0x1014ae41efd0001, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:52,571 INFO [RS:0;e335232f7491:33385 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,33385,1732467111110; zookeeper connection closed. 2024-11-24T16:51:52,572 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1401998e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1401998e 2024-11-24T16:51:52,572 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T16:51:52,585 DEBUG [M:0;e335232f7491:45439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4264a2d3fe89438a923d05011656d990 is 52, key is load_balancer_on/state:d/1732467112248/Put/seqid=0 2024-11-24T16:51:52,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741840_1016 (size=5056) 2024-11-24T16:51:52,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741840_1016 (size=5056) 2024-11-24T16:51:52,590 INFO [M:0;e335232f7491:45439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4264a2d3fe89438a923d05011656d990 2024-11-24T16:51:52,597 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dafb75d0a714cb2a20560a60c638767 as hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dafb75d0a714cb2a20560a60c638767 2024-11-24T16:51:52,602 INFO [M:0;e335232f7491:45439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dafb75d0a714cb2a20560a60c638767, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T16:51:52,603 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d683f11d11c640598182cfaf5b92c0bf as hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d683f11d11c640598182cfaf5b92c0bf 2024-11-24T16:51:52,609 INFO [M:0;e335232f7491:45439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d683f11d11c640598182cfaf5b92c0bf, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T16:51:52,610 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7a85ed0b406451faf4b469fbe6fbc28 as hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7a85ed0b406451faf4b469fbe6fbc28 2024-11-24T16:51:52,616 INFO [M:0;e335232f7491:45439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7a85ed0b406451faf4b469fbe6fbc28, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T16:51:52,617 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4264a2d3fe89438a923d05011656d990 as hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4264a2d3fe89438a923d05011656d990 2024-11-24T16:51:52,623 INFO [M:0;e335232f7491:45439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/9afee1b2-591e-81a6-bb92-9d96461ea90d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4264a2d3fe89438a923d05011656d990, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T16:51:52,624 INFO [M:0;e335232f7491:45439 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=29, compaction requested=false 2024-11-24T16:51:52,626 INFO [M:0;e335232f7491:45439 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:52,626 DEBUG [M:0;e335232f7491:45439 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467112484Disabling compacts and flushes for region at 1732467112484Disabling writes for close at 1732467112484Obtaining lock to block concurrent updates at 1732467112485 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467112485Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732467112485Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467112486 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467112486Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467112501 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467112502 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467112514 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467112530 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467112531 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467112543 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467112558 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467112558Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467112569 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467112584 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467112585 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15625a4d: reopening flushed file at 1732467112596 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a02eb86: reopening flushed file at 1732467112602 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d96c7fc: reopening flushed file at 1732467112609 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e69633b: reopening flushed file at 1732467112616 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=29, compaction requested=false at 1732467112624 (+8 ms)Writing region close event to WAL at 1732467112626 (+2 ms)Closed at 1732467112626 2024-11-24T16:51:52,626 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,626 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,626 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,627 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,627 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:51:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36611 is added to blk_1073741830_1006 (size=10311) 2024-11-24T16:51:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46151 is added to blk_1073741830_1006 (size=10311) 2024-11-24T16:51:52,630 INFO [M:0;e335232f7491:45439 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:51:52,630 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:51:52,630 INFO [M:0;e335232f7491:45439 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45439 2024-11-24T16:51:52,631 INFO [M:0;e335232f7491:45439 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:51:52,732 INFO [M:0;e335232f7491:45439 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:51:52,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:52,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45439-0x1014ae41efd0000, quorum=127.0.0.1:51845, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:51:52,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@254b7745{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:52,736 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@86e294e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:51:52,736 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:51:52,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64e5ce98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:51:52,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f27499{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir/,STOPPED} 2024-11-24T16:51:52,738 WARN [BP-1485803164-172.17.0.2-1732467110205 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:51:52,738 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:51:52,738 WARN [BP-1485803164-172.17.0.2-1732467110205 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1485803164-172.17.0.2-1732467110205 (Datanode Uuid a08036a4-6b45-40d2-b954-bc08ac0d7dba) service to localhost/127.0.0.1:42105 2024-11-24T16:51:52,738 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:51:52,738 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data3/current/BP-1485803164-172.17.0.2-1732467110205 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:52,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data4/current/BP-1485803164-172.17.0.2-1732467110205 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:52,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:51:52,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a249094{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:52,745 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35445cbd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:51:52,745 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:51:52,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7247ee1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:51:52,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@670e4080{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir/,STOPPED} 2024-11-24T16:51:52,746 WARN [BP-1485803164-172.17.0.2-1732467110205 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:51:52,746 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:51:52,747 WARN [BP-1485803164-172.17.0.2-1732467110205 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1485803164-172.17.0.2-1732467110205 (Datanode Uuid d05fe914-e1e2-4389-8e6e-c195b1c7c8c7) service to localhost/127.0.0.1:42105 2024-11-24T16:51:52,747 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:51:52,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data1/current/BP-1485803164-172.17.0.2-1732467110205 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:52,748 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/cluster_5fbb8f11-7b1d-27c0-26f0-353903b8d76c/data/data2/current/BP-1485803164-172.17.0.2-1732467110205 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:51:52,748 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:51:52,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1dc8c743{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:51:52,755 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c4717a0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:51:52,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:51:52,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ae773f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:51:52,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a69944b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir/,STOPPED} 2024-11-24T16:51:52,762 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:51:52,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:51:52,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:51:52,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.log.dir so I do NOT create it in target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca 2024-11-24T16:51:52,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d66509d0-cec9-db59-2d3d-a8fe6f2741ee/hadoop.tmp.dir so I do NOT create it in target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2, deleteOnExit=true 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/test.cache.data in system properties and HBase conf 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:51:52,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:51:52,784 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:51:52,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:51:52,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:51:52,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:51:52,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:51:52,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:51:52,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:51:52,801 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:51:52,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:52,881 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:51:52,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:51:52,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:51:52,884 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:51:52,885 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:52,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:51:52,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:51:53,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cd2a640{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-43075-hadoop-hdfs-3_4_1-tests_jar-_-any-5175457202412833710/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:51:53,003 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:43075} 2024-11-24T16:51:53,003 INFO [Time-limited test {}] server.Server(415): Started @103965ms 2024-11-24T16:51:53,017 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:51:53,090 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:53,094 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:51:53,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:51:53,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:51:53,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:51:53,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:51:53,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:51:53,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ab5393f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-40321-hadoop-hdfs-3_4_1-tests_jar-_-any-9402253563247431636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:53,215 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:40321} 2024-11-24T16:51:53,215 INFO [Time-limited test {}] server.Server(415): Started @104177ms 2024-11-24T16:51:53,216 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:51:53,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:51:53,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:51:53,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:51:53,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:51:53,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:51:53,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:51:53,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:51:53,319 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data2/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:53,319 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data1/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:53,343 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:51:53,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x463f2791306d347 with lease ID 0x212d005473bac036: Processing first storage report for DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8 from datanode DatanodeRegistration(127.0.0.1:38531, datanodeUuid=9108b226-f41c-473c-8782-8726a21395d3, infoPort=35007, infoSecurePort=0, ipcPort=44515, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:51:53,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x463f2791306d347 with lease ID 0x212d005473bac036: from storage DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8 node DatanodeRegistration(127.0.0.1:38531, datanodeUuid=9108b226-f41c-473c-8782-8726a21395d3, infoPort=35007, infoSecurePort=0, ipcPort=44515, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T16:51:53,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x463f2791306d347 with lease ID 0x212d005473bac036: Processing first storage report for DS-86f3c3d9-20e8-4f1c-8667-52378e9c38da from datanode DatanodeRegistration(127.0.0.1:38531, datanodeUuid=9108b226-f41c-473c-8782-8726a21395d3, infoPort=35007, infoSecurePort=0, ipcPort=44515, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:51:53,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x463f2791306d347 with lease ID 0x212d005473bac036: from storage DS-86f3c3d9-20e8-4f1c-8667-52378e9c38da node DatanodeRegistration(127.0.0.1:38531, datanodeUuid=9108b226-f41c-473c-8782-8726a21395d3, infoPort=35007, infoSecurePort=0, ipcPort=44515, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:53,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21d5e4af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-45441-hadoop-hdfs-3_4_1-tests_jar-_-any-1183932308984882675/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:51:53,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:45441} 2024-11-24T16:51:53,374 INFO [Time-limited test {}] server.Server(415): Started @104336ms 2024-11-24T16:51:53,376 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:51:53,496 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data3/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:53,496 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data4/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:51:53,514 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:51:53,517 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bf01142b93593c with lease ID 0x212d005473bac037: Processing first storage report for DS-911a699d-718c-45e8-8b9b-d4334c5c0d84 from datanode DatanodeRegistration(127.0.0.1:36379, datanodeUuid=61c9693b-f575-4f3d-a37d-37fe8ba7ee69, infoPort=39177, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:51:53,517 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bf01142b93593c with lease ID 0x212d005473bac037: from storage DS-911a699d-718c-45e8-8b9b-d4334c5c0d84 node DatanodeRegistration(127.0.0.1:36379, datanodeUuid=61c9693b-f575-4f3d-a37d-37fe8ba7ee69, infoPort=39177, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:53,517 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bf01142b93593c with lease ID 0x212d005473bac037: Processing first storage report for DS-12fc6047-b1bc-4de9-9aaf-042ece08439d from datanode DatanodeRegistration(127.0.0.1:36379, datanodeUuid=61c9693b-f575-4f3d-a37d-37fe8ba7ee69, infoPort=39177, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:51:53,517 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bf01142b93593c with lease ID 0x212d005473bac037: from storage DS-12fc6047-b1bc-4de9-9aaf-042ece08439d node DatanodeRegistration(127.0.0.1:36379, datanodeUuid=61c9693b-f575-4f3d-a37d-37fe8ba7ee69, infoPort=39177, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:51:53,526 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:51:53,604 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca 2024-11-24T16:51:53,607 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/zookeeper_0, clientPort=63785, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:51:53,608 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63785 2024-11-24T16:51:53,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:51:53,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:51:53,622 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8 with version=8 2024-11-24T16:51:53,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:51:53,624 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:51:53,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:53,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:53,624 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:51:53,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:53,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:51:53,625 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:51:53,625 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:51:53,625 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43593 2024-11-24T16:51:53,627 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43593 connecting to ZooKeeper ensemble=127.0.0.1:63785 2024-11-24T16:51:53,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435930x0, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:51:53,634 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43593-0x1014ae429160000 connected 2024-11-24T16:51:53,649 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,651 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:53,654 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8, hbase.cluster.distributed=false 2024-11-24T16:51:53,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:51:53,656 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43593 2024-11-24T16:51:53,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43593 2024-11-24T16:51:53,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43593 2024-11-24T16:51:53,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43593 2024-11-24T16:51:53,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43593 2024-11-24T16:51:53,674 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:51:53,674 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:51:53,675 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41569 2024-11-24T16:51:53,676 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41569 connecting to ZooKeeper ensemble=127.0.0.1:63785 2024-11-24T16:51:53,677 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,679 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415690x0, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:51:53,685 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41569-0x1014ae429160001 connected 2024-11-24T16:51:53,686 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:51:53,686 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:51:53,686 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:51:53,687 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:51:53,688 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:51:53,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41569 2024-11-24T16:51:53,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41569 2024-11-24T16:51:53,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41569 2024-11-24T16:51:53,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41569 2024-11-24T16:51:53,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41569 2024-11-24T16:51:53,707 DEBUG [M:0;e335232f7491:43593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:43593 2024-11-24T16:51:53,707 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,43593,1732467113624 2024-11-24T16:51:53,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:53,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:53,709 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,43593,1732467113624 2024-11-24T16:51:53,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:51:53,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,712 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:51:53,712 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,43593,1732467113624 from backup master directory 2024-11-24T16:51:53,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,43593,1732467113624 2024-11-24T16:51:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:51:53,714 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:51:53,714 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,43593,1732467113624 2024-11-24T16:51:53,718 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/hbase.id] with ID: 7a9bc5c4-4cf9-4227-91b5-31e8262d47de 2024-11-24T16:51:53,719 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/.tmp/hbase.id 2024-11-24T16:51:53,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:51:53,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:51:53,728 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/.tmp/hbase.id]:[hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/hbase.id] 2024-11-24T16:51:53,743 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:53,743 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:51:53,745 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T16:51:53,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:51:53,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:51:53,761 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:51:53,763 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:51:53,763 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:53,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:51:53,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:51:53,773 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store 2024-11-24T16:51:53,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:51:53,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:51:53,780 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:53,780 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:51:53,780 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:53,780 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:53,780 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:51:53,780 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:53,781 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:51:53,781 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467113780Disabling compacts and flushes for region at 1732467113780Disabling writes for close at 1732467113780Writing region close event to WAL at 1732467113781 (+1 ms)Closed at 1732467113781 2024-11-24T16:51:53,782 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/.initializing 2024-11-24T16:51:53,782 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624 2024-11-24T16:51:53,785 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C43593%2C1732467113624, suffix=, logDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624, archiveDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/oldWALs, maxLogs=10 2024-11-24T16:51:53,785 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C43593%2C1732467113624.1732467113785 2024-11-24T16:51:53,790 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 2024-11-24T16:51:53,791 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39177:39177),(127.0.0.1/127.0.0.1:35007:35007)] 2024-11-24T16:51:53,792 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:51:53,792 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:53,792 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,792 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:51:53,798 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:53,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:51:53,801 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:53,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:51:53,803 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:53,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:51:53,805 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:53,805 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,806 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,806 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,808 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,808 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,809 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:51:53,810 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:51:53,813 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:51:53,813 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772631, jitterRate=-0.017549768090248108}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:51:53,815 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467113793Initializing all the Stores at 1732467113793Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467113793Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467113796 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467113796Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467113796Cleaning up temporary data from old regions at 1732467113808 (+12 ms)Region opened successfully at 1732467113815 (+7 ms) 2024-11-24T16:51:53,815 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:51:53,819 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59062378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:51:53,820 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:51:53,821 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:51:53,821 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:51:53,821 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:51:53,822 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T16:51:53,822 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T16:51:53,822 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:51:53,825 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:51:53,826 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:51:53,827 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:51:53,827 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:51:53,828 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:51:53,829 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:51:53,830 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:51:53,830 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:51:53,832 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:51:53,832 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:51:53,835 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:51:53,837 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:51:53,839 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:51:53,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:53,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:51:53,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,841 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,43593,1732467113624, sessionid=0x1014ae429160000, setting cluster-up flag (Was=false) 2024-11-24T16:51:53,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,850 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:51:53,851 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,43593,1732467113624 2024-11-24T16:51:53,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:53,862 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:51:53,863 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,43593,1732467113624 2024-11-24T16:51:53,865 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:51:53,867 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:53,867 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:51:53,867 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:51:53,867 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,43593,1732467113624 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:51:53,869 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467143870 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:51:53,871 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,871 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:51:53,871 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:51:53,872 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:51:53,872 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:51:53,872 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:51:53,872 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:51:53,872 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,873 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:51:53,876 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467113872,5,FailOnTimeoutGroup] 2024-11-24T16:51:53,876 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467113876,5,FailOnTimeoutGroup] 2024-11-24T16:51:53,876 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,876 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:51:53,876 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,876 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:51:53,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:51:53,886 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:51:53,886 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8 2024-11-24T16:51:53,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:51:53,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:51:53,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:53,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:51:53,896 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(746): ClusterId : 7a9bc5c4-4cf9-4227-91b5-31e8262d47de 2024-11-24T16:51:53,896 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:51:53,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:51:53,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:53,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:51:53,898 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:51:53,899 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:51:53,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,899 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:51:53,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:53,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:51:53,901 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:51:53,901 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:51:53,901 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,901 DEBUG [RS:0;e335232f7491:41569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eb4bc50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:51:53,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:53,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:51:53,903 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:51:53,903 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:53,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:53,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:51:53,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740 2024-11-24T16:51:53,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740 2024-11-24T16:51:53,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:51:53,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:51:53,907 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:51:53,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:51:53,911 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:51:53,911 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802595, jitterRate=0.020552843809127808}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:51:53,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467113894Initializing all the Stores at 1732467113894Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467113894Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467113895 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467113895Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467113895Cleaning up temporary data from old regions at 1732467113906 (+11 ms)Region opened successfully at 1732467113912 (+6 ms) 2024-11-24T16:51:53,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:51:53,912 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:51:53,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:51:53,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:51:53,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:51:53,915 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:41569 2024-11-24T16:51:53,915 INFO [RS:0;e335232f7491:41569 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:51:53,915 INFO [RS:0;e335232f7491:41569 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:51:53,915 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:51:53,915 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:51:53,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467113912Disabling compacts and flushes for region at 1732467113912Disabling writes for close at 1732467113913 (+1 ms)Writing region close event to WAL at 1732467113915 (+2 ms)Closed at 1732467113915 2024-11-24T16:51:53,916 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,43593,1732467113624 with port=41569, startcode=1732467113673 2024-11-24T16:51:53,916 DEBUG [RS:0;e335232f7491:41569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:51:53,917 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:53,917 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:51:53,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:51:53,919 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48731, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:51:53,919 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:51:53,920 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43593 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,41569,1732467113673 2024-11-24T16:51:53,920 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43593 {}] master.ServerManager(517): Registering regionserver=e335232f7491,41569,1732467113673 2024-11-24T16:51:53,921 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:51:53,922 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8 2024-11-24T16:51:53,922 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37129 2024-11-24T16:51:53,922 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:51:53,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:51:53,924 DEBUG [RS:0;e335232f7491:41569 {}] zookeeper.ZKUtil(111): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,41569,1732467113673 2024-11-24T16:51:53,924 WARN [RS:0;e335232f7491:41569 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:51:53,924 INFO [RS:0;e335232f7491:41569 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:53,925 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673 2024-11-24T16:51:53,925 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,41569,1732467113673] 2024-11-24T16:51:53,928 INFO [RS:0;e335232f7491:41569 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:51:53,931 INFO [RS:0;e335232f7491:41569 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:51:53,931 INFO [RS:0;e335232f7491:41569 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:51:53,931 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,932 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:51:53,933 INFO [RS:0;e335232f7491:41569 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:51:53,933 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:51:53,933 DEBUG [RS:0;e335232f7491:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:51:53,934 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,934 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,934 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,934 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,934 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,934 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,41569,1732467113673-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:51:53,951 INFO [RS:0;e335232f7491:41569 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:51:53,952 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,41569,1732467113673-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,952 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,952 INFO [RS:0;e335232f7491:41569 {}] regionserver.Replication(171): e335232f7491,41569,1732467113673 started 2024-11-24T16:51:53,967 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:53,967 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,41569,1732467113673, RpcServer on e335232f7491/172.17.0.2:41569, sessionid=0x1014ae429160001 2024-11-24T16:51:53,967 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:51:53,967 DEBUG [RS:0;e335232f7491:41569 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,41569,1732467113673 2024-11-24T16:51:53,968 DEBUG [RS:0;e335232f7491:41569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,41569,1732467113673' 2024-11-24T16:51:53,968 DEBUG [RS:0;e335232f7491:41569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:51:53,968 DEBUG [RS:0;e335232f7491:41569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:51:53,969 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:51:53,969 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:51:53,969 DEBUG [RS:0;e335232f7491:41569 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,41569,1732467113673 2024-11-24T16:51:53,969 DEBUG [RS:0;e335232f7491:41569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,41569,1732467113673' 2024-11-24T16:51:53,969 DEBUG [RS:0;e335232f7491:41569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:51:53,969 DEBUG [RS:0;e335232f7491:41569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:51:53,970 DEBUG [RS:0;e335232f7491:41569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:51:53,970 INFO [RS:0;e335232f7491:41569 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:51:53,970 INFO [RS:0;e335232f7491:41569 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:51:54,071 WARN [e335232f7491:43593 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T16:51:54,072 INFO [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C41569%2C1732467113673, suffix=, logDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673, archiveDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs, maxLogs=32 2024-11-24T16:51:54,074 INFO [RS:0;e335232f7491:41569 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.1732467114073 2024-11-24T16:51:54,081 INFO [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 2024-11-24T16:51:54,086 DEBUG [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35007:35007),(127.0.0.1/127.0.0.1:39177:39177)] 2024-11-24T16:51:54,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:51:54,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:51:54,321 DEBUG [e335232f7491:43593 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:51:54,322 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,41569,1732467113673 2024-11-24T16:51:54,324 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,41569,1732467113673, state=OPENING 2024-11-24T16:51:54,327 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:51:54,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:54,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:51:54,329 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:51:54,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:54,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:54,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,41569,1732467113673}] 2024-11-24T16:51:54,483 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:51:54,485 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39657, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:51:54,489 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:51:54,490 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:54,491 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C41569%2C1732467113673.meta, suffix=.meta, logDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673, archiveDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs, maxLogs=32 2024-11-24T16:51:54,492 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta 2024-11-24T16:51:54,497 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta 2024-11-24T16:51:54,498 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35007:35007),(127.0.0.1/127.0.0.1:39177:39177)] 2024-11-24T16:51:54,499 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:51:54,499 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:51:54,499 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:51:54,499 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:51:54,499 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:51:54,499 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:54,500 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:51:54,500 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:51:54,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:51:54,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:51:54,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:54,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:54,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:51:54,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:51:54,503 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:54,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:54,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:51:54,505 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:51:54,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:54,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:54,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:51:54,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:51:54,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:54,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:51:54,506 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:51:54,507 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740 2024-11-24T16:51:54,508 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740 2024-11-24T16:51:54,509 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:51:54,509 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:51:54,510 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:51:54,511 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:51:54,512 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838577, jitterRate=0.06630688905715942}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:51:54,512 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:51:54,513 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467114500Writing region info on filesystem at 1732467114500Initializing all the Stores at 1732467114501 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467114501Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467114501Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467114501Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467114501Cleaning up temporary data from old regions at 1732467114510 (+9 ms)Running coprocessor post-open hooks at 1732467114512 (+2 ms)Region opened successfully at 1732467114513 (+1 ms) 2024-11-24T16:51:54,514 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467114483 2024-11-24T16:51:54,517 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:51:54,517 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:51:54,518 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,41569,1732467113673 2024-11-24T16:51:54,520 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,41569,1732467113673, state=OPEN 2024-11-24T16:51:54,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:51:54,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:51:54,524 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:54,524 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,41569,1732467113673 2024-11-24T16:51:54,524 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:51:54,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:51:54,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,41569,1732467113673 in 194 msec 2024-11-24T16:51:54,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:51:54,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-24T16:51:54,532 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:51:54,532 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:51:54,533 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:51:54,533 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,41569,1732467113673, seqNum=-1] 2024-11-24T16:51:54,534 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:51:54,535 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43743, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:51:54,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 674 msec 2024-11-24T16:51:54,542 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467114542, completionTime=-1 2024-11-24T16:51:54,542 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:51:54,542 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467174544 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467234544 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,43593,1732467113624-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,43593,1732467113624-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,43593,1732467113624-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:43593, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,544 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,545 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,546 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:51:54,548 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.834sec 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,43593,1732467113624-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:51:54,549 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,43593,1732467113624-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:51:54,551 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:51:54,551 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:51:54,551 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,43593,1732467113624-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd7b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:51:54,597 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,43593,-1 for getting cluster id 2024-11-24T16:51:54,597 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:51:54,599 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7a9bc5c4-4cf9-4227-91b5-31e8262d47de' 2024-11-24T16:51:54,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:51:54,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7a9bc5c4-4cf9-4227-91b5-31e8262d47de" 2024-11-24T16:51:54,600 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@132de1c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:51:54,600 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,43593,-1] 2024-11-24T16:51:54,600 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:51:54,600 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:51:54,602 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:51:54,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@abbe752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:51:54,603 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:51:54,604 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,41569,1732467113673, seqNum=-1] 2024-11-24T16:51:54,605 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:51:54,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:51:54,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,43593,1732467113624 2024-11-24T16:51:54,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:54,611 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:51:54,627 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:51:54,627 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:51:54,628 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42939 2024-11-24T16:51:54,630 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42939 connecting to ZooKeeper ensemble=127.0.0.1:63785 2024-11-24T16:51:54,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:54,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:51:54,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429390x0, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:51:54,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42939-0x1014ae429160002 connected 2024-11-24T16:51:54,638 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-24T16:51:54,638 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-24T16:51:54,639 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:51:54,639 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:51:54,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:51:54,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:51:54,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42939 2024-11-24T16:51:54,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42939 2024-11-24T16:51:54,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42939 2024-11-24T16:51:54,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42939 2024-11-24T16:51:54,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42939 2024-11-24T16:51:54,647 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(746): ClusterId : 7a9bc5c4-4cf9-4227-91b5-31e8262d47de 2024-11-24T16:51:54,647 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:51:54,649 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:51:54,649 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:51:54,651 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:51:54,652 DEBUG [RS:1;e335232f7491:42939 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d02f4f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:51:54,664 DEBUG [RS:1;e335232f7491:42939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e335232f7491:42939 2024-11-24T16:51:54,664 INFO [RS:1;e335232f7491:42939 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:51:54,664 INFO [RS:1;e335232f7491:42939 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:51:54,664 DEBUG [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:51:54,665 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,43593,1732467113624 with port=42939, startcode=1732467114627 2024-11-24T16:51:54,666 DEBUG [RS:1;e335232f7491:42939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:51:54,668 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35731, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:51:54,668 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43593 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,42939,1732467114627 2024-11-24T16:51:54,668 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43593 {}] master.ServerManager(517): Registering regionserver=e335232f7491,42939,1732467114627 2024-11-24T16:51:54,670 DEBUG [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8 2024-11-24T16:51:54,670 DEBUG [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37129 2024-11-24T16:51:54,670 DEBUG [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:51:54,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:51:54,672 DEBUG [RS:1;e335232f7491:42939 {}] zookeeper.ZKUtil(111): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,42939,1732467114627 2024-11-24T16:51:54,672 WARN [RS:1;e335232f7491:42939 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:51:54,672 INFO [RS:1;e335232f7491:42939 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:51:54,673 DEBUG [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627 2024-11-24T16:51:54,673 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,42939,1732467114627] 2024-11-24T16:51:54,679 INFO [RS:1;e335232f7491:42939 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:51:54,680 INFO [RS:1;e335232f7491:42939 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:51:54,681 INFO [RS:1;e335232f7491:42939 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:51:54,682 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,684 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:51:54,685 INFO [RS:1;e335232f7491:42939 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:51:54,685 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,685 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,686 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,686 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,686 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,686 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:51:54,686 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:51:54,686 DEBUG [RS:1;e335232f7491:42939 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:51:54,687 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,688 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,688 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,688 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,688 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,688 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,42939,1732467114627-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:51:54,708 INFO [RS:1;e335232f7491:42939 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:51:54,709 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,42939,1732467114627-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,709 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,709 INFO [RS:1;e335232f7491:42939 {}] regionserver.Replication(171): e335232f7491,42939,1732467114627 started 2024-11-24T16:51:54,723 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:51:54,723 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,42939,1732467114627, RpcServer on e335232f7491/172.17.0.2:42939, sessionid=0x1014ae429160002 2024-11-24T16:51:54,723 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:51:54,723 DEBUG [RS:1;e335232f7491:42939 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,42939,1732467114627 2024-11-24T16:51:54,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;e335232f7491:42939,5,FailOnTimeoutGroup] 2024-11-24T16:51:54,723 DEBUG [RS:1;e335232f7491:42939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,42939,1732467114627' 2024-11-24T16:51:54,723 DEBUG [RS:1;e335232f7491:42939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:51:54,723 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-24T16:51:54,724 DEBUG [RS:1;e335232f7491:42939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:51:54,724 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T16:51:54,724 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:51:54,724 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:51:54,724 DEBUG [RS:1;e335232f7491:42939 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,42939,1732467114627 2024-11-24T16:51:54,724 DEBUG [RS:1;e335232f7491:42939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,42939,1732467114627' 2024-11-24T16:51:54,724 DEBUG [RS:1;e335232f7491:42939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:51:54,725 DEBUG [RS:1;e335232f7491:42939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:51:54,725 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is e335232f7491,43593,1732467113624 2024-11-24T16:51:54,725 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@547d4f62 2024-11-24T16:51:54,725 DEBUG [RS:1;e335232f7491:42939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:51:54,725 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T16:51:54,725 INFO [RS:1;e335232f7491:42939 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:51:54,725 INFO [RS:1;e335232f7491:42939 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:51:54,727 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T16:51:54,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T16:51:54,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T16:51:54,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:51:54,729 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:51:54,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T16:51:54,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:51:54,731 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T16:51:54,731 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:54,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-24T16:51:54,733 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T16:51:54,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:51:54,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741835_1011 (size=393) 2024-11-24T16:51:54,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741835_1011 (size=393) 2024-11-24T16:51:54,746 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d2af8ce2e6847c2e4a4c860497aaae56, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8 2024-11-24T16:51:54,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:51:54,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:51:54,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:51:54,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38531 is added to blk_1073741836_1012 (size=76) 2024-11-24T16:51:54,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741836_1012 (size=76) 2024-11-24T16:51:54,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:54,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing d2af8ce2e6847c2e4a4c860497aaae56, disabling compactions & flushes 2024-11-24T16:51:54,758 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:54,759 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:54,759 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. after waiting 0 ms 2024-11-24T16:51:54,759 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:54,759 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:54,759 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for d2af8ce2e6847c2e4a4c860497aaae56: Waiting for close lock at 1732467114758Disabling compacts and flushes for region at 1732467114758Disabling writes for close at 1732467114759 (+1 ms)Writing region close event to WAL at 1732467114759Closed at 1732467114759 2024-11-24T16:51:54,761 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T16:51:54,761 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732467114761"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467114761"}]},"ts":"1732467114761"} 2024-11-24T16:51:54,764 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T16:51:54,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T16:51:54,765 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T16:51:54,765 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467114765"}]},"ts":"1732467114765"} 2024-11-24T16:51:54,767 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-24T16:51:54,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d2af8ce2e6847c2e4a4c860497aaae56, ASSIGN}] 2024-11-24T16:51:54,769 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d2af8ce2e6847c2e4a4c860497aaae56, ASSIGN 2024-11-24T16:51:54,770 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d2af8ce2e6847c2e4a4c860497aaae56, ASSIGN; state=OFFLINE, location=e335232f7491,41569,1732467113673; forceNewPlan=false, retain=false 2024-11-24T16:51:54,827 INFO [RS:1;e335232f7491:42939 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C42939%2C1732467114627, suffix=, logDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627, archiveDir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs, maxLogs=32 2024-11-24T16:51:54,829 INFO [RS:1;e335232f7491:42939 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C42939%2C1732467114627.1732467114828 2024-11-24T16:51:54,835 INFO [RS:1;e335232f7491:42939 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 2024-11-24T16:51:54,835 DEBUG [RS:1;e335232f7491:42939 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39177:39177),(127.0.0.1/127.0.0.1:35007:35007)] 2024-11-24T16:51:54,921 INFO [e335232f7491:43593 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T16:51:54,921 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d2af8ce2e6847c2e4a4c860497aaae56, regionState=OPENING, regionLocation=e335232f7491,41569,1732467113673 2024-11-24T16:51:54,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d2af8ce2e6847c2e4a4c860497aaae56, ASSIGN because future has completed 2024-11-24T16:51:54,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2af8ce2e6847c2e4a4c860497aaae56, server=e335232f7491,41569,1732467113673}] 2024-11-24T16:51:55,083 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:55,083 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d2af8ce2e6847c2e4a4c860497aaae56, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:51:55,084 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,084 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:51:55,084 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,084 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,087 INFO [StoreOpener-d2af8ce2e6847c2e4a4c860497aaae56-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,089 INFO [StoreOpener-d2af8ce2e6847c2e4a4c860497aaae56-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2af8ce2e6847c2e4a4c860497aaae56 columnFamilyName info 2024-11-24T16:51:55,089 DEBUG [StoreOpener-d2af8ce2e6847c2e4a4c860497aaae56-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:51:55,090 INFO [StoreOpener-d2af8ce2e6847c2e4a4c860497aaae56-1 {}] regionserver.HStore(327): Store=d2af8ce2e6847c2e4a4c860497aaae56/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:51:55,090 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,091 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,091 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,092 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,092 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,094 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,096 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:51:55,097 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d2af8ce2e6847c2e4a4c860497aaae56; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764375, jitterRate=-0.02804705500602722}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:51:55,097 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:51:55,098 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d2af8ce2e6847c2e4a4c860497aaae56: Running coprocessor pre-open hook at 1732467115084Writing region info on filesystem at 1732467115084Initializing all the Stores at 1732467115085 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467115085Cleaning up temporary data from old regions at 1732467115092 (+7 ms)Running coprocessor post-open hooks at 1732467115097 (+5 ms)Region opened successfully at 1732467115098 (+1 ms) 2024-11-24T16:51:55,100 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56., pid=6, masterSystemTime=1732467115078 2024-11-24T16:51:55,103 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:55,103 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:51:55,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d2af8ce2e6847c2e4a4c860497aaae56, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,41569,1732467113673 2024-11-24T16:51:55,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2af8ce2e6847c2e4a4c860497aaae56, server=e335232f7491,41569,1732467113673 because future has completed 2024-11-24T16:51:55,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T16:51:55,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d2af8ce2e6847c2e4a4c860497aaae56, server=e335232f7491,41569,1732467113673 in 183 msec 2024-11-24T16:51:55,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T16:51:55,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d2af8ce2e6847c2e4a4c860497aaae56, ASSIGN in 343 msec 2024-11-24T16:51:55,116 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T16:51:55,116 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467115116"}]},"ts":"1732467115116"} 2024-11-24T16:51:55,118 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-24T16:51:55,119 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T16:51:55,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 392 msec 2024-11-24T16:52:00,002 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:52:00,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:00,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:00,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:00,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:00,041 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-24T16:52:04,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T16:52:04,301 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-24T16:52:04,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43593 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:52:04,792 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-24T16:52:04,792 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-24T16:52:04,795 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T16:52:04,795 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:04,813 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:04,816 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:04,817 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:04,817 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:04,817 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:04,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36d0b5ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:04,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51f59516{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:04,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c5438f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-38965-hadoop-hdfs-3_4_1-tests_jar-_-any-7776976173279211660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:04,944 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e7a8425{HTTP/1.1, (http/1.1)}{localhost:38965} 2024-11-24T16:52:04,944 INFO [Time-limited test {}] server.Server(415): Started @115906ms 2024-11-24T16:52:04,945 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:04,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:04,989 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:04,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:04,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:04,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:04,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@712f5f14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:04,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3afd309b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:05,081 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data5/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:05,085 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data6/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:05,125 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba6dd035c87d6998 with lease ID 0x212d005473bac038: Processing first storage report for DS-11ec1284-1e3b-4161-a323-43766c51e957 from datanode DatanodeRegistration(127.0.0.1:45771, datanodeUuid=ce539d68-62df-4116-9256-b813dcb10f36, infoPort=34805, infoSecurePort=0, ipcPort=33185, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:52:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba6dd035c87d6998 with lease ID 0x212d005473bac038: from storage DS-11ec1284-1e3b-4161-a323-43766c51e957 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=ce539d68-62df-4116-9256-b813dcb10f36, infoPort=34805, infoSecurePort=0, ipcPort=33185, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba6dd035c87d6998 with lease ID 0x212d005473bac038: Processing first storage report for DS-808e889f-c763-4beb-8b90-7cd7a3596052 from datanode DatanodeRegistration(127.0.0.1:45771, datanodeUuid=ce539d68-62df-4116-9256-b813dcb10f36, infoPort=34805, infoSecurePort=0, ipcPort=33185, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:52:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba6dd035c87d6998 with lease ID 0x212d005473bac038: from storage DS-808e889f-c763-4beb-8b90-7cd7a3596052 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=ce539d68-62df-4116-9256-b813dcb10f36, infoPort=34805, infoSecurePort=0, ipcPort=33185, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:05,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39835cdd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-42709-hadoop-hdfs-3_4_1-tests_jar-_-any-16840739832526228600/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:05,136 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12b19204{HTTP/1.1, (http/1.1)}{localhost:42709} 2024-11-24T16:52:05,136 INFO [Time-limited test {}] server.Server(415): Started @116098ms 2024-11-24T16:52:05,138 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:05,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:05,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:05,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:05,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:05,245 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:05,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68004957{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:05,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78e445ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:05,293 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:05,293 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:05,339 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:05,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c4db33cee34c550 with lease ID 0x212d005473bac039: Processing first storage report for DS-55665e5c-2237-4673-8c26-7ddd0c72a44c from datanode DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:52:05,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c4db33cee34c550 with lease ID 0x212d005473bac039: from storage DS-55665e5c-2237-4673-8c26-7ddd0c72a44c node DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:05,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c4db33cee34c550 with lease ID 0x212d005473bac039: Processing first storage report for DS-cd21ca09-bd9a-4d24-b112-4e34173e28f3 from datanode DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:52:05,348 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c4db33cee34c550 with lease ID 0x212d005473bac039: from storage DS-cd21ca09-bd9a-4d24-b112-4e34173e28f3 node DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:05,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ea37f0d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-33145-hadoop-hdfs-3_4_1-tests_jar-_-any-11516396701796085737/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:05,401 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15010086{HTTP/1.1, (http/1.1)}{localhost:33145} 2024-11-24T16:52:05,401 INFO [Time-limited test {}] server.Server(415): Started @116363ms 2024-11-24T16:52:05,402 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:05,509 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data9/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:05,509 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data10/current/BP-481434126-172.17.0.2-1732467112819/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:05,535 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:05,538 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55f7100598566ce1 with lease ID 0x212d005473bac03a: Processing first storage report for DS-f9df768e-7e35-43ff-bf26-7eeb71dea001 from datanode DatanodeRegistration(127.0.0.1:35581, datanodeUuid=a148d418-9972-478b-8b7e-d81fdbef960b, infoPort=45681, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:52:05,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55f7100598566ce1 with lease ID 0x212d005473bac03a: from storage DS-f9df768e-7e35-43ff-bf26-7eeb71dea001 node DatanodeRegistration(127.0.0.1:35581, datanodeUuid=a148d418-9972-478b-8b7e-d81fdbef960b, infoPort=45681, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:05,538 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55f7100598566ce1 with lease ID 0x212d005473bac03a: Processing first storage report for DS-d7a56bb4-90a4-450a-bf8a-bed8c9e32577 from datanode DatanodeRegistration(127.0.0.1:35581, datanodeUuid=a148d418-9972-478b-8b7e-d81fdbef960b, infoPort=45681, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819) 2024-11-24T16:52:05,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55f7100598566ce1 with lease ID 0x212d005473bac03a: from storage DS-d7a56bb4-90a4-450a-bf8a-bed8c9e32577 node DatanodeRegistration(127.0.0.1:35581, datanodeUuid=a148d418-9972-478b-8b7e-d81fdbef960b, infoPort=45681, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:05,630 WARN [ResponseProcessor for block BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,630 WARN [ResponseProcessor for block BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,631 WARN [ResponseProcessor for block BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,631 WARN [ResponseProcessor for block BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,631 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 block BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:05,631 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta block BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:05,632 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 block BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:05,632 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 block BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:05,631 WARN [PacketResponder: BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36379] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,632 WARN [PacketResponder: BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36379] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:35376 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35376 dst: /127.0.0.1:36379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:58420 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58420 dst: /127.0.0.1:38531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,633 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21d5e4af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:05,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:35406 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35406 dst: /127.0.0.1:36379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:58388 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58388 dst: /127.0.0.1:38531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-678646167_22 at /127.0.0.1:58452 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58452 dst: /127.0.0.1:38531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-678646167_22 at /127.0.0.1:35442 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35442 dst: /127.0.0.1:36379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,634 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:05,634 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:05,634 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:58424 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58424 dst: /127.0.0.1:38531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,634 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:05,634 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:05,634 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:35408 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35408 dst: /127.0.0.1:36379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:05,635 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:05,635 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-481434126-172.17.0.2-1732467112819 (Datanode Uuid 61c9693b-f575-4f3d-a37d-37fe8ba7ee69) service to localhost/127.0.0.1:37129 2024-11-24T16:52:05,636 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data3/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:05,636 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:05,636 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data4/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:05,637 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:05,637 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:05,640 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 block BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,642 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 block BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ab5393f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:05,642 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:05,642 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:05,642 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta block BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,642 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 block BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:05,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:05,645 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:05,645 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:05,645 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:05,645 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-481434126-172.17.0.2-1732467112819 (Datanode Uuid 9108b226-f41c-473c-8782-8726a21395d3) service to localhost/127.0.0.1:37129 2024-11-24T16:52:05,646 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data1/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:05,646 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data2/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:05,646 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:05,650 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56., hostname=e335232f7491,41569,1732467113673, seqNum=2] 2024-11-24T16:52:05,651 ERROR [FSHLog-0-hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8-prefix:e335232f7491,41569,1732467113673 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,651 WARN [FSHLog-0-hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8-prefix:e335232f7491,41569,1732467113673 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,652 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,652 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C41569%2C1732467113673:(num 1732467114073) roll requested 2024-11-24T16:52:05,652 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.1732467125652 2024-11-24T16:52:05,655 WARN [Thread-900 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,655 WARN [Thread-900 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:05,655 WARN [Thread-900 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741838_1018 2024-11-24T16:52:05,657 WARN [Thread-900 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:05,660 WARN [Thread-900 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1019 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,660 WARN [Thread-900 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:05,661 WARN [Thread-900 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741839_1019 2024-11-24T16:52:05,661 WARN [Thread-900 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:05,665 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:05,665 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:05,666 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:05,666 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:05,666 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:05,666 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467125652 2024-11-24T16:52:05,666 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,667 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:05,667 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T16:52:05,668 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T16:52:05,668 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 2024-11-24T16:52:05,668 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34805:34805),(127.0.0.1/127.0.0.1:45681:45681)] 2024-11-24T16:52:05,668 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:05,670 WARN [IPC Server handler 4 on default port 37129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1009 2024-11-24T16:52:05,674 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 after 4ms 2024-11-24T16:52:05,764 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:06,687 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:07,669 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:07,670 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467125652 2024-11-24T16:52:07,670 WARN [ResponseProcessor for block BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:07,671 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467125652 block BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:07,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:49032 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49032 dst: /127.0.0.1:45771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:07,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:51230 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35581:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51230 dst: /127.0.0.1:35581 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:07,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c5438f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:07,673 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e7a8425{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:07,673 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:07,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51f59516{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:07,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36d0b5ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:07,675 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:07,675 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:07,675 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-481434126-172.17.0.2-1732467112819 (Datanode Uuid ce539d68-62df-4116-9256-b813dcb10f36) service to localhost/127.0.0.1:37129 2024-11-24T16:52:07,675 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:07,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data5/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:07,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data6/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:07,676 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:07,764 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:08,687 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:09,670 WARN [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]] 2024-11-24T16:52:09,670 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:09,670 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C41569%2C1732467113673:(num 1732467125652) roll requested 2024-11-24T16:52:09,670 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.1732467129670 2024-11-24T16:52:09,674 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 after 4006ms 2024-11-24T16:52:09,676 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:09,676 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:09,677 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:09,677 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:09,677 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:09,677 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467125652 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467129670 2024-11-24T16:52:09,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35581 is added to blk_1073741840_1022 (size=2431) 2024-11-24T16:52:09,681 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T16:52:09,686 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40239:40239),(127.0.0.1/127.0.0.1:45681:45681)] 2024-11-24T16:52:09,686 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:09,686 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467125652 is not closed yet, will try archiving it next time 2024-11-24T16:52:09,765 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:10,081 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:10,688 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,556 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c91f805[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35581, datanodeUuid=a148d418-9972-478b-8b7e-d81fdbef960b, infoPort=45681, infoSecurePort=0, ipcPort=45919, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741840_1022 to 127.0.0.1:38531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:11,686 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,687 WARN [ResponseProcessor for block BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,688 WARN [DataStreamer for file /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467129670 block BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:11,688 WARN [PacketResponder: BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35581] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:11,689 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:59338 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59338 dst: /127.0.0.1:43629 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:11,689 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:51248 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:35581:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51248 dst: /127.0.0.1:35581 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:11,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ea37f0d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:11,691 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15010086{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:11,691 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:11,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78e445ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:11,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68004957{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:11,693 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:11,693 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:11,693 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:11,693 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-481434126-172.17.0.2-1732467112819 (Datanode Uuid a148d418-9972-478b-8b7e-d81fdbef960b) service to localhost/127.0.0.1:37129 2024-11-24T16:52:11,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data9/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:11,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data10/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:11,694 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41569 {}] regionserver.HRegion(8855): Flush requested on d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:11,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:52:11,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/ab74675505484d12ae73f3ae373e0e81 is 1080, key is row0002/info:/1732467127678/Put/seqid=0 2024-11-24T16:52:11,731 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,731 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:11,731 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741842_1025 2024-11-24T16:52:11,732 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:11,734 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,734 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:11,734 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741843_1026 2024-11-24T16:52:11,734 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:11,736 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,736 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:11,736 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741844_1027 2024-11-24T16:52:11,737 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:11,739 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:11,739 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46638 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741845_1028 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:11,739 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:11,740 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741845_1028 2024-11-24T16:52:11,740 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46638 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:11,740 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46638 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46638 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:11,741 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:11,742 WARN [IPC Server handler 1 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:11,742 WARN [IPC Server handler 1 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:11,742 WARN [IPC Server handler 1 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:11,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741846_1029 (size=10347) 2024-11-24T16:52:11,765 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:12,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/ab74675505484d12ae73f3ae373e0e81 2024-11-24T16:52:12,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/ab74675505484d12ae73f3ae373e0e81 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/ab74675505484d12ae73f3ae373e0e81 2024-11-24T16:52:12,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/ab74675505484d12ae73f3ae373e0e81, entries=5, sequenceid=11, filesize=10.1 K 2024-11-24T16:52:12,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for d2af8ce2e6847c2e4a4c860497aaae56 in 465ms, sequenceid=11, compaction requested=false 2024-11-24T16:52:12,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:12,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41569 {}] regionserver.HRegion(8855): Flush requested on d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:12,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-24T16:52:12,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/9b82f4ecf514456fbbff7d2710b9d9d4 is 1080, key is row0007/info:/1732467131705/Put/seqid=0 2024-11-24T16:52:12,353 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:12,353 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:12,353 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741847_1030 2024-11-24T16:52:12,354 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:12,359 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:12,360 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:12,360 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741848_1031 2024-11-24T16:52:12,361 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:12,365 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:12,364 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46664 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741849_1032 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:12,365 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:12,365 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741849_1032 2024-11-24T16:52:12,365 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46664 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:12,365 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46664 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46664 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:12,366 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:12,369 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:12,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46680 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741850_1033 to mirror 127.0.0.1:38531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:12,369 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:12,369 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741850_1033 2024-11-24T16:52:12,369 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46680 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:12,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46680 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46680 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:12,369 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:12,370 WARN [IPC Server handler 4 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:12,370 WARN [IPC Server handler 4 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:12,371 WARN [IPC Server handler 4 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:12,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741851_1034 (size=12506) 2024-11-24T16:52:12,688 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:12,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/9b82f4ecf514456fbbff7d2710b9d9d4 2024-11-24T16:52:12,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/9b82f4ecf514456fbbff7d2710b9d9d4 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4 2024-11-24T16:52:12,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4, entries=7, sequenceid=24, filesize=12.2 K 2024-11-24T16:52:12,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for d2af8ce2e6847c2e4a4c860497aaae56 in 455ms, sequenceid=24, compaction requested=false 2024-11-24T16:52:12,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:12,799 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-24T16:52:12,799 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:12,799 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4 because midkey is the same as first or last row 2024-11-24T16:52:13,686 WARN [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]] 2024-11-24T16:52:13,686 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,687 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C41569%2C1732467113673:(num 1732467129670) roll requested 2024-11-24T16:52:13,687 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.1732467133687 2024-11-24T16:52:13,690 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,690 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:13,690 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741852_1035 2024-11-24T16:52:13,691 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:13,693 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,693 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:13,693 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741853_1036 2024-11-24T16:52:13,694 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:13,695 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,695 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:13,695 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741854_1037 2024-11-24T16:52:13,696 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:13,698 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,698 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46698 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741855_1038 to mirror 127.0.0.1:38531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:13,698 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:13,698 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741855_1038 2024-11-24T16:52:13,698 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46698 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T16:52:13,698 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46698 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46698 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:13,699 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:13,699 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:13,700 WARN [IPC Server handler 0 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:13,700 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:13,702 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:13,702 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:13,702 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:13,702 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:13,703 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:13,703 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467129670 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467133687 2024-11-24T16:52:13,704 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40239:40239)] 2024-11-24T16:52:13,704 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:13,704 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467129670 is not closed yet, will try archiving it next time 2024-11-24T16:52:13,704 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467125652 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs/e335232f7491%2C41569%2C1732467113673.1732467125652 2024-11-24T16:52:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741841_1024 (size=25992) 2024-11-24T16:52:13,765 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41569 {}] regionserver.HRegion(8855): Flush requested on d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:13,769 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T16:52:13,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/5f070993497d4870bae6dee3d8c52bd2 is 1079, key is tmprow/info:/1732467133768/Put/seqid=0 2024-11-24T16:52:13,779 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46720 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741857_1040 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:13,780 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:13,780 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741857_1040 2024-11-24T16:52:13,780 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46720 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:13,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46720 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46720 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:13,780 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:13,782 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,782 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:13,782 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741858_1041 2024-11-24T16:52:13,782 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:13,785 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,785 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:13,785 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741859_1042 2024-11-24T16:52:13,785 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:13,787 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:13,787 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:13,787 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741860_1043 2024-11-24T16:52:13,788 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:13,788 WARN [IPC Server handler 3 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:13,788 WARN [IPC Server handler 3 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:13,789 WARN [IPC Server handler 3 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:13,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741861_1044 (size=6027) 2024-11-24T16:52:14,105 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:14,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/5f070993497d4870bae6dee3d8c52bd2 2024-11-24T16:52:14,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/5f070993497d4870bae6dee3d8c52bd2 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/5f070993497d4870bae6dee3d8c52bd2 2024-11-24T16:52:14,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/5f070993497d4870bae6dee3d8c52bd2, entries=1, sequenceid=34, filesize=5.9 K 2024-11-24T16:52:14,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d2af8ce2e6847c2e4a4c860497aaae56 in 438ms, sequenceid=34, compaction requested=true 2024-11-24T16:52:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-24T16:52:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4 because midkey is the same as first or last row 2024-11-24T16:52:14,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2af8ce2e6847c2e4a4c860497aaae56:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:52:14,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:52:14,208 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:52:14,209 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:52:14,209 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HStore(1541): d2af8ce2e6847c2e4a4c860497aaae56/info is initiating minor compaction (all files) 2024-11-24T16:52:14,209 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d2af8ce2e6847c2e4a4c860497aaae56/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:14,209 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/ab74675505484d12ae73f3ae373e0e81, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/5f070993497d4870bae6dee3d8c52bd2] into tmpdir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp, totalSize=28.2 K 2024-11-24T16:52:14,209 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.Compactor(225): Compacting ab74675505484d12ae73f3ae373e0e81, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732467127678 2024-11-24T16:52:14,210 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b82f4ecf514456fbbff7d2710b9d9d4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732467131705 2024-11-24T16:52:14,210 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f070993497d4870bae6dee3d8c52bd2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732467133768 2024-11-24T16:52:14,223 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2af8ce2e6847c2e4a4c860497aaae56#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:52:14,224 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/087a4ca4932d4045a1ccc709c9735877 is 1080, key is row0002/info:/1732467127678/Put/seqid=0 2024-11-24T16:52:14,226 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:14,226 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:14,226 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741862_1045 2024-11-24T16:52:14,226 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:14,229 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:14,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46746 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741863_1046 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,229 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:14,229 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741863_1046 2024-11-24T16:52:14,229 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46746 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:14,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46746 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46746 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,230 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:14,232 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:14,232 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46748 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741864_1047 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,232 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:14,232 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741864_1047 2024-11-24T16:52:14,232 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46748 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:14,232 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46748 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46748 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,232 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:14,235 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:14,235 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:14,235 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741865_1048 2024-11-24T16:52:14,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46762 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741865_1048 to mirror 127.0.0.1:38531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,235 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46762 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:14,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46762 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46762 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,235 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:14,236 WARN [IPC Server handler 1 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:14,236 WARN [IPC Server handler 1 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:14,236 WARN [IPC Server handler 1 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:14,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741866_1049 (size=17994) 2024-11-24T16:52:14,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1156af6d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741846_1029 to 127.0.0.1:45771 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1f914337[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741851_1034 to 127.0.0.1:36379 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:14,646 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/087a4ca4932d4045a1ccc709c9735877 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 2024-11-24T16:52:14,653 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d2af8ce2e6847c2e4a4c860497aaae56/info of d2af8ce2e6847c2e4a4c860497aaae56 into 087a4ca4932d4045a1ccc709c9735877(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:52:14,653 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:14,653 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56., storeName=d2af8ce2e6847c2e4a4c860497aaae56/info, priority=13, startTime=1732467134207; duration=0sec 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 because midkey is the same as first or last row 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 because midkey is the same as first or last row 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 because midkey is the same as first or last row 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:52:14,654 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2af8ce2e6847c2e4a4c860497aaae56:info 2024-11-24T16:52:14,688 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41569 {}] regionserver.HRegion(8855): Flush requested on d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:15,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T16:52:15,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/9ec07ee1ae5445c08a42c96d137d12fe is 1079, key is tmprow/info:/1732467135187/Put/seqid=0 2024-11-24T16:52:15,195 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,195 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:15,195 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741867_1050 2024-11-24T16:52:15,196 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:15,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46784 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741868_1051 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,198 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,198 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46784 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:15,198 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:15,198 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741868_1051 2024-11-24T16:52:15,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46784 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46784 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,199 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:15,201 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46790 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741869_1052 to mirror 127.0.0.1:38531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,201 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:15,201 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741869_1052 2024-11-24T16:52:15,201 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46790 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:15,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46790 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46790 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,201 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:15,203 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36379 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,203 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46806 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741870_1053 to mirror 127.0.0.1:36379 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,204 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:15,204 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741870_1053 2024-11-24T16:52:15,204 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46806 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:15,204 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46806 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46806 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,204 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:15,205 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:15,205 WARN [IPC Server handler 0 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:15,205 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:15,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741871_1054 (size=6027) 2024-11-24T16:52:15,349 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1156af6d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741861_1044 to 127.0.0.1:38531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,349 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1f914337[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741841_1024 to 127.0.0.1:45771 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:15,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/9ec07ee1ae5445c08a42c96d137d12fe 2024-11-24T16:52:15,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/9ec07ee1ae5445c08a42c96d137d12fe as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9ec07ee1ae5445c08a42c96d137d12fe 2024-11-24T16:52:15,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9ec07ee1ae5445c08a42c96d137d12fe, entries=1, sequenceid=45, filesize=5.9 K 2024-11-24T16:52:15,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d2af8ce2e6847c2e4a4c860497aaae56 in 432ms, sequenceid=45, compaction requested=false 2024-11-24T16:52:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-24T16:52:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 because midkey is the same as first or last row 2024-11-24T16:52:15,704 WARN [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]] 2024-11-24T16:52:15,705 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,705 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C41569%2C1732467113673:(num 1732467133687) roll requested 2024-11-24T16:52:15,705 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.1732467135705 2024-11-24T16:52:15,708 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,708 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:15,708 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741872_1055 2024-11-24T16:52:15,708 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:15,710 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,710 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:15,710 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741873_1056 2024-11-24T16:52:15,710 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:15,711 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,711 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:15,711 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741874_1057 2024-11-24T16:52:15,712 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:15,713 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:15,713 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:15,713 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741875_1058 2024-11-24T16:52:15,713 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:15,714 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:15,714 WARN [IPC Server handler 0 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:15,714 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:15,717 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:15,717 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:15,717 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:15,717 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:15,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:15,717 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467133687 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467135705 2024-11-24T16:52:15,718 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40239:40239)] 2024-11-24T16:52:15,718 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:15,718 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467133687 is not closed yet, will try archiving it next time 2024-11-24T16:52:15,718 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467129670 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs/e335232f7491%2C41569%2C1732467113673.1732467129670 2024-11-24T16:52:15,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741856_1039 (size=13591) 2024-11-24T16:52:15,766 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:16,120 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 is not closed yet, will try archiving it next time 2024-11-24T16:52:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41569 {}] regionserver.HRegion(8855): Flush requested on d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:16,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T16:52:16,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/995f3460f0a04794be3784737fc6d532 is 1079, key is tmprow/info:/1732467136605/Put/seqid=0 2024-11-24T16:52:16,614 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36379 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:16,614 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46830 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741877_1060 to mirror 127.0.0.1:36379 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:16,614 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:16,614 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741877_1060 2024-11-24T16:52:16,614 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46830 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:16,614 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46830 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46830 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:16,614 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:16,616 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:16,616 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:16,616 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741878_1061 2024-11-24T16:52:16,616 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:16,617 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:16,618 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:16,618 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741879_1062 2024-11-24T16:52:16,618 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:16,619 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:16,620 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:16,620 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741880_1063 2024-11-24T16:52:16,620 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:16,621 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:16,621 WARN [IPC Server handler 0 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:16,621 WARN [IPC Server handler 0 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741881_1064 (size=6027) 2024-11-24T16:52:16,689 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/995f3460f0a04794be3784737fc6d532 2024-11-24T16:52:17,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/995f3460f0a04794be3784737fc6d532 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/995f3460f0a04794be3784737fc6d532 2024-11-24T16:52:17,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/995f3460f0a04794be3784737fc6d532, entries=1, sequenceid=55, filesize=5.9 K 2024-11-24T16:52:17,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d2af8ce2e6847c2e4a4c860497aaae56 in 432ms, sequenceid=55, compaction requested=true 2024-11-24T16:52:17,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:17,038 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-24T16:52:17,038 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:17,039 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 because midkey is the same as first or last row 2024-11-24T16:52:17,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2af8ce2e6847c2e4a4c860497aaae56:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:52:17,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:52:17,039 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:52:17,040 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:52:17,040 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HStore(1541): d2af8ce2e6847c2e4a4c860497aaae56/info is initiating minor compaction (all files) 2024-11-24T16:52:17,040 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d2af8ce2e6847c2e4a4c860497aaae56/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:17,040 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9ec07ee1ae5445c08a42c96d137d12fe, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/995f3460f0a04794be3784737fc6d532] into tmpdir=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp, totalSize=29.3 K 2024-11-24T16:52:17,041 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 087a4ca4932d4045a1ccc709c9735877, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732467127678 2024-11-24T16:52:17,041 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ec07ee1ae5445c08a42c96d137d12fe, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732467135187 2024-11-24T16:52:17,041 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 995f3460f0a04794be3784737fc6d532, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732467136605 2024-11-24T16:52:17,058 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2af8ce2e6847c2e4a4c860497aaae56#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:52:17,059 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/43ec630295da49aa909deb2454472457 is 1080, key is row0002/info:/1732467127678/Put/seqid=0 2024-11-24T16:52:17,062 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46838 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741882_1065 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,062 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:17,062 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741882_1065 2024-11-24T16:52:17,062 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46838 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:17,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46838 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46838 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,063 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:17,065 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46852 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741883_1066 to mirror 127.0.0.1:38531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,065 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:17,065 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741883_1066 2024-11-24T16:52:17,065 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46852 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:17,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46852 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46852 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,066 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:17,068 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46858 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741884_1067 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,068 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:17,068 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741884_1067 2024-11-24T16:52:17,068 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46858 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:17,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46858 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46858 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,069 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:17,071 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36379 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,070 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46866 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741885_1068 to mirror 127.0.0.1:36379 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,071 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]) is bad. 2024-11-24T16:52:17,071 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741885_1068 2024-11-24T16:52:17,071 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46866 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:17,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:46866 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46866 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,071 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36379,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK] 2024-11-24T16:52:17,072 WARN [IPC Server handler 4 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T16:52:17,072 WARN [IPC Server handler 4 on default port 37129 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T16:52:17,072 WARN [IPC Server handler 4 on default port 37129 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T16:52:17,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741886_1069 (size=18097) 2024-11-24T16:52:17,349 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1f914337[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741866_1049 to 127.0.0.1:45771 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,349 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1156af6d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741871_1054 to 127.0.0.1:36379 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:17,483 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/43ec630295da49aa909deb2454472457 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/43ec630295da49aa909deb2454472457 2024-11-24T16:52:17,489 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d2af8ce2e6847c2e4a4c860497aaae56/info of d2af8ce2e6847c2e4a4c860497aaae56 into 43ec630295da49aa909deb2454472457(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:17,490 INFO [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56., storeName=d2af8ce2e6847c2e4a4c860497aaae56/info, priority=13, startTime=1732467137039; duration=0sec 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/43ec630295da49aa909deb2454472457 because midkey is the same as first or last row 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/43ec630295da49aa909deb2454472457 because midkey is the same as first or last row 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/43ec630295da49aa909deb2454472457 because midkey is the same as first or last row 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:52:17,490 DEBUG [RS:0;e335232f7491:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2af8ce2e6847c2e4a4c860497aaae56:info 2024-11-24T16:52:17,718 WARN [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-24T16:52:17,718 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,766 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:17,828 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:17,831 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:17,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:17,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:17,837 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:17,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719d00c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:17,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc86a20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:17,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@402253c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/java.io.tmpdir/jetty-localhost-37657-hadoop-hdfs-3_4_1-tests_jar-_-any-2264482082648155636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:17,957 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5dd0b56c{HTTP/1.1, (http/1.1)}{localhost:37657} 2024-11-24T16:52:17,957 INFO [Time-limited test {}] server.Server(415): Started @128919ms 2024-11-24T16:52:17,959 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:18,055 WARN [Thread-992 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:18,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd64fa5f40d260a12 with lease ID 0x212d005473bac03b: from storage DS-911a699d-718c-45e8-8b9b-d4334c5c0d84 node DatanodeRegistration(127.0.0.1:44831, datanodeUuid=61c9693b-f575-4f3d-a37d-37fe8ba7ee69, infoPort=46801, infoSecurePort=0, ipcPort=33553, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:18,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd64fa5f40d260a12 with lease ID 0x212d005473bac03b: from storage DS-12fc6047-b1bc-4de9-9aaf-042ece08439d node DatanodeRegistration(127.0.0.1:44831, datanodeUuid=61c9693b-f575-4f3d-a37d-37fe8ba7ee69, infoPort=46801, infoSecurePort=0, ipcPort=33553, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:18,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1f914337[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741881_1064 to 127.0.0.1:38531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:18,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1156af6d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741856_1039 to 127.0.0.1:45771 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:18,689 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:19,719 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:19,766 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:20,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741886_1069 (size=18097) 2024-11-24T16:52:20,689 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:21,719 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:21,767 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:22,690 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,604 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:52:23,720 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,767 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,871 ERROR [FSHLog-0-hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData-prefix:e335232f7491,43593,1732467113624 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,871 WARN [FSHLog-0-hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData-prefix:e335232f7491,43593,1732467113624 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,871 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C43593%2C1732467113624:(num 1732467113785) roll requested 2024-11-24T16:52:23,872 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C43593%2C1732467113624.1732467143871 2024-11-24T16:52:23,875 WARN [Thread-1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,875 WARN [Thread-1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:23,875 WARN [Thread-1013 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741887_1070 2024-11-24T16:52:23,876 WARN [Thread-1013 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:23,878 WARN [Thread-1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,878 WARN [Thread-1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:23,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:53910 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741888_1071 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:23,878 WARN [Thread-1013 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741888_1071 2024-11-24T16:52:23,878 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:53910 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T16:52:23,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:53910 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53910 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:23,879 WARN [Thread-1013 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:23,880 WARN [Thread-1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,880 WARN [Thread-1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:44831,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:23,880 WARN [Thread-1013 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741889_1072 2024-11-24T16:52:23,881 WARN [Thread-1013 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:23,884 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:23,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:23,884 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:23,885 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:23,885 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:23,885 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467143871 2024-11-24T16:52:23,885 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,886 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:23,886 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 2024-11-24T16:52:23,886 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46801:46801),(127.0.0.1/127.0.0.1:40239:40239)] 2024-11-24T16:52:23,886 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 is not closed yet, will try archiving it next time 2024-11-24T16:52:23,886 WARN [IPC Server handler 2 on default port 37129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-24T16:52:23,887 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 after 1ms 2024-11-24T16:52:24,690 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:25,720 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:26,690 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:27,720 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:27,888 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 after 4002ms 2024-11-24T16:52:28,078 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@22a46c2d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-481434126-172.17.0.2-1732467112819:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:38531,null,null]) java.net.ConnectException: Call From e335232f7491/172.17.0.2 to localhost:44515 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T16:52:28,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741833_1021 (size=455) 2024-11-24T16:52:28,691 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:28,692 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467114073 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs/e335232f7491%2C41569%2C1732467113673.1732467114073 2024-11-24T16:52:28,693 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467133687 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs/e335232f7491%2C41569%2C1732467113673.1732467133687 2024-11-24T16:52:29,721 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:30,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741833_1021 (size=455) 2024-11-24T16:52:30,691 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,586 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.1732467151585 2024-11-24T16:52:31,588 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,588 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:31,589 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741891_1075 2024-11-24T16:52:31,589 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:31,591 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:36658 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741892_1076 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,591 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:31,591 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741892_1076 2024-11-24T16:52:31,591 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:36658 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T16:52:31,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-618061464_22 at /127.0.0.1:36658 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36658 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,592 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:31,593 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,593 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:44831,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:31,593 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741893_1077 2024-11-24T16:52:31,594 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:31,598 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,598 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,598 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467135705 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467151585 2024-11-24T16:52:31,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741876_1059 (size=12911) 2024-11-24T16:52:31,602 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46801:46801),(127.0.0.1/127.0.0.1:40239:40239)] 2024-11-24T16:52:31,602 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467135705 is not closed yet, will try archiving it next time 2024-11-24T16:52:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41569 {}] regionserver.HRegion(8855): Flush requested on d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:31,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T16:52:31,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/3124e866fdc34d6cbbc641e372890e58 is 1080, key is row0013/info:/1732467151604/Put/seqid=0 2024-11-24T16:52:31,614 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,614 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:36672 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741895_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741895_1079 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,615 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:31,615 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741895_1079 2024-11-24T16:52:31,615 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:36672 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741895_1079] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:31,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:36672 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741895_1079] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36672 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,615 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:31,617 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,617 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:31,617 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741896_1080 2024-11-24T16:52:31,617 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:31,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741897_1081 (size=8190) 2024-11-24T16:52:31,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741897_1081 (size=8190) 2024-11-24T16:52:31,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/3124e866fdc34d6cbbc641e372890e58 2024-11-24T16:52:31,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/3124e866fdc34d6cbbc641e372890e58 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/3124e866fdc34d6cbbc641e372890e58 2024-11-24T16:52:31,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/3124e866fdc34d6cbbc641e372890e58, entries=3, sequenceid=66, filesize=8.0 K 2024-11-24T16:52:31,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for d2af8ce2e6847c2e4a4c860497aaae56 in 29ms, sequenceid=66, compaction requested=false 2024-11-24T16:52:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d2af8ce2e6847c2e4a4c860497aaae56: 2024-11-24T16:52:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-24T16:52:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:52:31,636 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/43ec630295da49aa909deb2454472457 because midkey is the same as first or last row 2024-11-24T16:52:31,721 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-24T16:52:31,721 INFO [regionserver/e335232f7491:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:52:31,824 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:52:31,825 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:52:31,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:31,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:31,825 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:52:31,825 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:52:31,825 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1874116964, stopped=false 2024-11-24T16:52:31,825 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,43593,1732467113624 2024-11-24T16:52:31,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:52:31,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:52:31,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:52:31,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:31,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:31,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:31,827 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:52:31,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:52:31,828 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:52:31,828 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:52:31,828 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:52:31,828 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:52:31,828 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:31,828 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,41569,1732467113673' ***** 2024-11-24T16:52:31,828 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:52:31,828 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,42939,1732467114627' ***** 2024-11-24T16:52:31,828 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:52:31,828 INFO [RS:1;e335232f7491:42939 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:52:31,828 INFO [RS:0;e335232f7491:41569 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:52:31,829 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:52:31,829 INFO [RS:1;e335232f7491:42939 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:52:31,829 INFO [RS:0;e335232f7491:41569 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:52:31,829 INFO [RS:1;e335232f7491:42939 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:52:31,829 INFO [RS:0;e335232f7491:41569 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:52:31,829 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(959): stopping server e335232f7491,42939,1732467114627 2024-11-24T16:52:31,829 INFO [RS:1;e335232f7491:42939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:52:31,829 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(3091): Received CLOSE for d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:31,829 INFO [RS:1;e335232f7491:42939 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e335232f7491:42939. 2024-11-24T16:52:31,829 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:52:31,829 DEBUG [RS:1;e335232f7491:42939 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:52:31,829 DEBUG [RS:1;e335232f7491:42939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:31,829 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(976): stopping server e335232f7491,42939,1732467114627; all regions closed. 2024-11-24T16:52:31,829 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(959): stopping server e335232f7491,41569,1732467113673 2024-11-24T16:52:31,829 INFO [RS:0;e335232f7491:41569 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:52:31,829 INFO [RS:0;e335232f7491:41569 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:41569. 2024-11-24T16:52:31,829 DEBUG [RS:0;e335232f7491:41569 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:52:31,829 DEBUG [RS:0;e335232f7491:41569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:31,830 INFO [RS:0;e335232f7491:41569 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:52:31,830 INFO [RS:0;e335232f7491:41569 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:52:31,830 INFO [RS:0;e335232f7491:41569 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:52:31,830 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d2af8ce2e6847c2e4a4c860497aaae56, disabling compactions & flushes 2024-11-24T16:52:31,830 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:31,830 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. after waiting 0 ms 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:31,830 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1325): Online Regions={d2af8ce2e6847c2e4a4c860497aaae56=TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T16:52:31,830 DEBUG [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d2af8ce2e6847c2e4a4c860497aaae56 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:52:31,830 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:52:31,830 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d2af8ce2e6847c2e4a4c860497aaae56 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:52:31,830 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:52:31,830 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-24T16:52:31,830 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,830 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,831 ERROR [FSHLog-0-hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8-prefix:e335232f7491,41569,1732467113673.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,831 WARN [FSHLog-0-hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8-prefix:e335232f7491,41569,1732467113673.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,831 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,831 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C41569%2C1732467113673.meta:.meta(num 1732467114492) roll requested 2024-11-24T16:52:31,831 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C41569%2C1732467113673.meta.1732467151831.meta 2024-11-24T16:52:31,831 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,831 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 2024-11-24T16:52:31,832 WARN [IPC Server handler 0 on default port 37129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741837_1013 2024-11-24T16:52:31,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 after 0ms 2024-11-24T16:52:31,834 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,834 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:31,834 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741898_1083 2024-11-24T16:52:31,835 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:31,836 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/bf5719f4416f4a9fab897516be52381e is 1080, key is row0015/info:/1732467151608/Put/seqid=0 2024-11-24T16:52:31,836 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,836 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741899_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:44831,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:31,836 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741899_1084 2024-11-24T16:52:31,837 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:31,838 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,838 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:31,838 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741901_1086 2024-11-24T16:52:31,838 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35581 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,838 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:34752 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741900_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data4]'}, localName='127.0.0.1:44831', datanodeUuid='61c9693b-f575-4f3d-a37d-37fe8ba7ee69', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741900_1085 to mirror 127.0.0.1:35581 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,839 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741900_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44831,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:31,839 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741900_1085 2024-11-24T16:52:31,839 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:34752 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741900_1085] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:31,839 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:31,839 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:34752 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741900_1085] {}] datanode.DataXceiver(331): 127.0.0.1:44831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34752 dst: /127.0.0.1:44831 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,839 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:31,840 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,840 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:31,840 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741903_1088 2024-11-24T16:52:31,841 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:31,843 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,843 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:34768 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741904_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data4]'}, localName='127.0.0.1:44831', datanodeUuid='61c9693b-f575-4f3d-a37d-37fe8ba7ee69', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741904_1089 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,843 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741904_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44831,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:31,843 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:34768 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741904_1089] {}] datanode.BlockReceiver(316): Block 1073741904 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:31,843 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741904_1089 2024-11-24T16:52:31,843 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:34768 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741904_1089] {}] datanode.DataXceiver(331): 127.0.0.1:44831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34768 dst: /127.0.0.1:44831 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,843 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,844 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,844 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:31,844 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,844 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,844 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:31,844 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467151831.meta 2024-11-24T16:52:31,848 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,848 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,849 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta 2024-11-24T16:52:31,849 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40239:40239),(127.0.0.1/127.0.0.1:46801:46801)] 2024-11-24T16:52:31,849 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta is not closed yet, will try archiving it next time 2024-11-24T16:52:31,849 WARN [IPC Server handler 1 on default port 37129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta has not been closed. Lease recovery is in progress. RecoveryId = 1091 for block blk_1073741834_1010 2024-11-24T16:52:31,849 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta after 0ms 2024-11-24T16:52:31,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741905_1090 (size=14660) 2024-11-24T16:52:31,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741905_1090 (size=14660) 2024-11-24T16:52:31,861 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/bf5719f4416f4a9fab897516be52381e 2024-11-24T16:52:31,867 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/info/d660725b8a90449bac325c32e6ac08fd is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56./info:regioninfo/1732467115104/Put/seqid=0 2024-11-24T16:52:31,868 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/.tmp/info/bf5719f4416f4a9fab897516be52381e as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/bf5719f4416f4a9fab897516be52381e 2024-11-24T16:52:31,869 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,869 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:31,869 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741906_1092 2024-11-24T16:52:31,870 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:31,871 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,871 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK], DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:31,871 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741907_1093 2024-11-24T16:52:31,872 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:31,874 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/bf5719f4416f4a9fab897516be52381e, entries=9, sequenceid=78, filesize=14.3 K 2024-11-24T16:52:31,875 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for d2af8ce2e6847c2e4a4c860497aaae56 in 45ms, sequenceid=78, compaction requested=true 2024-11-24T16:52:31,876 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/ab74675505484d12ae73f3ae373e0e81, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/5f070993497d4870bae6dee3d8c52bd2, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9ec07ee1ae5445c08a42c96d137d12fe, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/995f3460f0a04794be3784737fc6d532] to archive 2024-11-24T16:52:31,877 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T16:52:31,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741908_1094 (size=7089) 2024-11-24T16:52:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741908_1094 (size=7089) 2024-11-24T16:52:31,878 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/info/d660725b8a90449bac325c32e6ac08fd 2024-11-24T16:52:31,879 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/ab74675505484d12ae73f3ae373e0e81 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/ab74675505484d12ae73f3ae373e0e81 2024-11-24T16:52:31,881 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9b82f4ecf514456fbbff7d2710b9d9d4 2024-11-24T16:52:31,882 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/087a4ca4932d4045a1ccc709c9735877 2024-11-24T16:52:31,884 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/5f070993497d4870bae6dee3d8c52bd2 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/5f070993497d4870bae6dee3d8c52bd2 2024-11-24T16:52:31,885 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9ec07ee1ae5445c08a42c96d137d12fe to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/9ec07ee1ae5445c08a42c96d137d12fe 2024-11-24T16:52:31,887 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/995f3460f0a04794be3784737fc6d532 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/info/995f3460f0a04794be3784737fc6d532 2024-11-24T16:52:31,887 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e335232f7491:43593 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T16:52:31,888 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ab74675505484d12ae73f3ae373e0e81=10347, 9b82f4ecf514456fbbff7d2710b9d9d4=12506, 087a4ca4932d4045a1ccc709c9735877=17994, 5f070993497d4870bae6dee3d8c52bd2=6027, 9ec07ee1ae5445c08a42c96d137d12fe=6027, 995f3460f0a04794be3784737fc6d532=6027] 2024-11-24T16:52:31,896 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d2af8ce2e6847c2e4a4c860497aaae56/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-24T16:52:31,897 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:31,897 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d2af8ce2e6847c2e4a4c860497aaae56: Waiting for close lock at 1732467151829Running coprocessor pre-close hooks at 1732467151829Disabling compacts and flushes for region at 1732467151829Disabling writes for close at 1732467151830 (+1 ms)Obtaining lock to block concurrent updates at 1732467151830Preparing flush snapshotting stores in d2af8ce2e6847c2e4a4c860497aaae56 at 1732467151830Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732467151831 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. at 1732467151832 (+1 ms)Flushing d2af8ce2e6847c2e4a4c860497aaae56/info: creating writer at 1732467151832Flushing d2af8ce2e6847c2e4a4c860497aaae56/info: appending metadata at 1732467151835 (+3 ms)Flushing d2af8ce2e6847c2e4a4c860497aaae56/info: closing flushed file at 1732467151835Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7444a035: reopening flushed file at 1732467151867 (+32 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for d2af8ce2e6847c2e4a4c860497aaae56 in 45ms, sequenceid=78, compaction requested=true at 1732467151875 (+8 ms)Writing region close event to WAL at 1732467151892 (+17 ms)Running coprocessor post-close hooks at 1732467151896 (+4 ms)Closed at 1732467151897 (+1 ms) 2024-11-24T16:52:31,897 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732467114728.d2af8ce2e6847c2e4a4c860497aaae56. 2024-11-24T16:52:31,903 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/ns/bf60fd0f6f47479ab23c6a1c53db0eab is 43, key is default/ns:d/1732467114536/Put/seqid=0 2024-11-24T16:52:31,905 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,905 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK], DatanodeInfoWithStorage[127.0.0.1:44831,DS-911a699d-718c-45e8-8b9b-d4334c5c0d84,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK]) is bad. 2024-11-24T16:52:31,905 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741909_1095 2024-11-24T16:52:31,906 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38531,DS-50a3ac05-fbd6-463c-ad45-2ac58d0f83b8,DISK] 2024-11-24T16:52:31,907 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,907 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK]) is bad. 2024-11-24T16:52:31,907 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741910_1096 2024-11-24T16:52:31,908 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35581,DS-f9df768e-7e35-43ff-bf26-7eeb71dea001,DISK] 2024-11-24T16:52:31,909 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45771 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:31,909 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:36722 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741911_1097] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8]'}, localName='127.0.0.1:43629', datanodeUuid='fb451156-f481-4fd3-8d9d-eaa0f33405e6', xmitsInProgress=0}:Exception transferring block BP-481434126-172.17.0.2-1732467112819:blk_1073741911_1097 to mirror 127.0.0.1:45771 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,910 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-481434126-172.17.0.2-1732467112819:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43629,DS-55665e5c-2237-4673-8c26-7ddd0c72a44c,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK]) is bad. 2024-11-24T16:52:31,910 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-481434126-172.17.0.2-1732467112819:blk_1073741911_1097 2024-11-24T16:52:31,910 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:36722 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741911_1097] {}] datanode.BlockReceiver(316): Block 1073741911 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T16:52:31,910 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1573696118_22 at /127.0.0.1:36722 [Receiving block BP-481434126-172.17.0.2-1732467112819:blk_1073741911_1097] {}] datanode.DataXceiver(331): 127.0.0.1:43629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36722 dst: /127.0.0.1:43629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:31,910 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45771,DS-11ec1284-1e3b-4161-a323-43766c51e957,DISK] 2024-11-24T16:52:31,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741912_1098 (size=5153) 2024-11-24T16:52:31,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741912_1098 (size=5153) 2024-11-24T16:52:31,920 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/ns/bf60fd0f6f47479ab23c6a1c53db0eab 2024-11-24T16:52:31,937 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:52:31,944 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/table/c835060297d646bdaaec7af669f80c2a is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732467115116/Put/seqid=0 2024-11-24T16:52:31,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741913_1099 (size=5424) 2024-11-24T16:52:31,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741913_1099 (size=5424) 2024-11-24T16:52:31,950 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/table/c835060297d646bdaaec7af669f80c2a 2024-11-24T16:52:31,956 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/info/d660725b8a90449bac325c32e6ac08fd as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/info/d660725b8a90449bac325c32e6ac08fd 2024-11-24T16:52:31,962 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/info/d660725b8a90449bac325c32e6ac08fd, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T16:52:31,963 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/ns/bf60fd0f6f47479ab23c6a1c53db0eab as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/ns/bf60fd0f6f47479ab23c6a1c53db0eab 2024-11-24T16:52:31,969 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/ns/bf60fd0f6f47479ab23c6a1c53db0eab, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T16:52:31,970 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/.tmp/table/c835060297d646bdaaec7af669f80c2a as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/table/c835060297d646bdaaec7af669f80c2a 2024-11-24T16:52:31,976 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/table/c835060297d646bdaaec7af669f80c2a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T16:52:31,977 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 147ms, sequenceid=11, compaction requested=false 2024-11-24T16:52:31,982 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T16:52:31,983 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:52:31,983 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:52:31,983 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467151830Running coprocessor pre-close hooks at 1732467151830Disabling compacts and flushes for region at 1732467151830Disabling writes for close at 1732467151830Obtaining lock to block concurrent updates at 1732467151830Preparing flush snapshotting stores in 1588230740 at 1732467151830Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732467151831 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732467151850 (+19 ms)Flushing 1588230740/info: creating writer at 1732467151850Flushing 1588230740/info: appending metadata at 1732467151867 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732467151867Flushing 1588230740/ns: creating writer at 1732467151885 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732467151903 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732467151903Flushing 1588230740/table: creating writer at 1732467151925 (+22 ms)Flushing 1588230740/table: appending metadata at 1732467151943 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732467151943Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45f35962: reopening flushed file at 1732467151955 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4585476a: reopening flushed file at 1732467151962 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d50c6b8: reopening flushed file at 1732467151970 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 147ms, sequenceid=11, compaction requested=false at 1732467151977 (+7 ms)Writing region close event to WAL at 1732467151979 (+2 ms)Running coprocessor post-close hooks at 1732467151983 (+4 ms)Closed at 1732467151983 2024-11-24T16:52:31,983 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:52:32,000 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.1732467135705 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs/e335232f7491%2C41569%2C1732467113673.1732467135705 2024-11-24T16:52:32,009 INFO [regionserver/e335232f7491:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T16:52:32,009 INFO [regionserver/e335232f7491:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T16:52:32,030 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(976): stopping server e335232f7491,41569,1732467113673; all regions closed. 2024-11-24T16:52:32,031 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:32,031 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:32,031 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:32,031 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:32,031 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741902_1087 (size=825) 2024-11-24T16:52:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741902_1087 (size=825) 2024-11-24T16:52:32,355 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1156af6d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43629, datanodeUuid=fb451156-f481-4fd3-8d9d-eaa0f33405e6, infoPort=40239, infoSecurePort=0, ipcPort=42983, storageInfo=lv=-57;cid=testClusterID;nsid=2124040174;c=1732467112819):Failed to transfer BP-481434126-172.17.0.2-1732467112819:blk_1073741876_1059 to 127.0.0.1:38531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:32,688 INFO [regionserver/e335232f7491:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T16:52:32,688 INFO [regionserver/e335232f7491:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T16:52:32,689 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:52:34,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T16:52:34,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:52:34,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T16:52:35,493 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T16:52:35,493 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T16:52:35,833 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 after 4001ms 2024-11-24T16:52:35,850 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta after 4001ms 2024-11-24T16:52:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741835_1011 (size=393) 2024-11-24T16:52:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:52:36,832 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T16:52:36,834 DEBUG [RS:1;e335232f7491:42939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C42939%2C1732467114627:(num 1732467114828) 2024-11-24T16:52:36,834 DEBUG [RS:1;e335232f7491:42939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:52:36,834 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:52:36,834 INFO [RS:1;e335232f7491:42939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:52:36,835 INFO [RS:1;e335232f7491:42939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42939 2024-11-24T16:52:36,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,42939,1732467114627 2024-11-24T16:52:36,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:52:36,836 INFO [RS:1;e335232f7491:42939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:52:36,838 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,42939,1732467114627] 2024-11-24T16:52:36,839 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,42939,1732467114627 already deleted, retry=false 2024-11-24T16:52:36,840 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,42939,1732467114627 expired; onlineServers=1 2024-11-24T16:52:36,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:36,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:36,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:52:36,938 INFO [RS:1;e335232f7491:42939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:52:36,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42939-0x1014ae429160002, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:52:36,938 INFO [RS:1;e335232f7491:42939 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,42939,1732467114627; zookeeper connection closed. 2024-11-24T16:52:36,938 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4016ffa2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4016ffa2 2024-11-24T16:52:37,032 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T16:52:37,035 DEBUG [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs 2024-11-24T16:52:37,035 INFO [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C41569%2C1732467113673.meta:.meta(num 1732467151831) 2024-11-24T16:52:37,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,036 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741894_1078 (size=14682) 2024-11-24T16:52:37,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741894_1078 (size=14682) 2024-11-24T16:52:37,040 DEBUG [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs 2024-11-24T16:52:37,040 INFO [RS:0;e335232f7491:41569 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C41569%2C1732467113673:(num 1732467151585) 2024-11-24T16:52:37,040 DEBUG [RS:0;e335232f7491:41569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:37,040 INFO [RS:0;e335232f7491:41569 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:52:37,040 INFO [RS:0;e335232f7491:41569 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:52:37,040 INFO [RS:0;e335232f7491:41569 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T16:52:37,040 INFO [RS:0;e335232f7491:41569 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:52:37,040 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:52:37,040 INFO [RS:0;e335232f7491:41569 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41569 2024-11-24T16:52:37,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:52:37,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,41569,1732467113673 2024-11-24T16:52:37,044 INFO [RS:0;e335232f7491:41569 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:52:37,045 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,41569,1732467113673] 2024-11-24T16:52:37,046 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,41569,1732467113673 already deleted, retry=false 2024-11-24T16:52:37,046 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,41569,1732467113673 expired; onlineServers=0 2024-11-24T16:52:37,046 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,43593,1732467113624' ***** 2024-11-24T16:52:37,046 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:52:37,046 INFO [M:0;e335232f7491:43593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:52:37,047 INFO [M:0;e335232f7491:43593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:52:37,047 DEBUG [M:0;e335232f7491:43593 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:52:37,047 DEBUG [M:0;e335232f7491:43593 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:52:37,047 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:52:37,047 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467113872 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467113872,5,FailOnTimeoutGroup] 2024-11-24T16:52:37,047 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467113876 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467113876,5,FailOnTimeoutGroup] 2024-11-24T16:52:37,047 INFO [M:0;e335232f7491:43593 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:52:37,047 INFO [M:0;e335232f7491:43593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:52:37,047 DEBUG [M:0;e335232f7491:43593 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:52:37,047 INFO [M:0;e335232f7491:43593 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:52:37,047 INFO [M:0;e335232f7491:43593 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:52:37,047 INFO [M:0;e335232f7491:43593 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:52:37,048 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:52:37,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:52:37,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:37,051 DEBUG [M:0;e335232f7491:43593 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-24T16:52:37,051 DEBUG [M:0;e335232f7491:43593 {}] master.ActiveMasterManager(353): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-24T16:52:37,052 INFO [M:0;e335232f7491:43593 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/.lastflushedseqids 2024-11-24T16:52:37,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741914_1100 (size=130) 2024-11-24T16:52:37,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741914_1100 (size=130) 2024-11-24T16:52:37,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:52:37,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:52:37,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:52:37,146 INFO [RS:0;e335232f7491:41569 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:52:37,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1014ae429160001, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:52:37,146 INFO [RS:0;e335232f7491:41569 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,41569,1732467113673; zookeeper connection closed. 2024-11-24T16:52:37,146 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1558d468 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1558d468 2024-11-24T16:52:37,146 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-24T16:52:37,420 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:52:37,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:37,459 INFO [M:0;e335232f7491:43593 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:52:37,459 INFO [M:0;e335232f7491:43593 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:52:37,459 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:52:37,459 INFO [M:0;e335232f7491:43593 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:37,459 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:37,459 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:52:37,459 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:37,459 INFO [M:0;e335232f7491:43593 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-24T16:52:37,482 DEBUG [M:0;e335232f7491:43593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17b8de74c7a64449af4c54def37506cd is 82, key is hbase:meta,,1/info:regioninfo/1732467114518/Put/seqid=0 2024-11-24T16:52:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741915_1101 (size=5672) 2024-11-24T16:52:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741915_1101 (size=5672) 2024-11-24T16:52:37,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:37,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:37,889 INFO [M:0;e335232f7491:43593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17b8de74c7a64449af4c54def37506cd 2024-11-24T16:52:37,910 DEBUG [M:0;e335232f7491:43593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0900c0ebafd740d69b59a8eb936f865c is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732467115121/Put/seqid=0 2024-11-24T16:52:37,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741916_1102 (size=6256) 2024-11-24T16:52:37,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741916_1102 (size=6256) 2024-11-24T16:52:37,915 INFO [M:0;e335232f7491:43593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0900c0ebafd740d69b59a8eb936f865c 2024-11-24T16:52:37,920 INFO [M:0;e335232f7491:43593 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0900c0ebafd740d69b59a8eb936f865c 2024-11-24T16:52:37,935 DEBUG [M:0;e335232f7491:43593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2990706fa0fa4c9b8810ea9ff22f1008 is 69, key is e335232f7491,41569,1732467113673/rs:state/1732467113920/Put/seqid=0 2024-11-24T16:52:37,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741917_1103 (size=5224) 2024-11-24T16:52:37,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741917_1103 (size=5224) 2024-11-24T16:52:37,941 INFO [M:0;e335232f7491:43593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2990706fa0fa4c9b8810ea9ff22f1008 2024-11-24T16:52:37,961 DEBUG [M:0;e335232f7491:43593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39d4a54a56eb476fbd284cbeffc518f5 is 52, key is load_balancer_on/state:d/1732467114610/Put/seqid=0 2024-11-24T16:52:37,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741918_1104 (size=5056) 2024-11-24T16:52:37,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741918_1104 (size=5056) 2024-11-24T16:52:37,966 INFO [M:0;e335232f7491:43593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39d4a54a56eb476fbd284cbeffc518f5 2024-11-24T16:52:37,971 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17b8de74c7a64449af4c54def37506cd as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17b8de74c7a64449af4c54def37506cd 2024-11-24T16:52:37,976 INFO [M:0;e335232f7491:43593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17b8de74c7a64449af4c54def37506cd, entries=8, sequenceid=60, filesize=5.5 K 2024-11-24T16:52:37,977 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0900c0ebafd740d69b59a8eb936f865c as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0900c0ebafd740d69b59a8eb936f865c 2024-11-24T16:52:37,982 INFO [M:0;e335232f7491:43593 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0900c0ebafd740d69b59a8eb936f865c 2024-11-24T16:52:37,982 INFO [M:0;e335232f7491:43593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0900c0ebafd740d69b59a8eb936f865c, entries=6, sequenceid=60, filesize=6.1 K 2024-11-24T16:52:37,983 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2990706fa0fa4c9b8810ea9ff22f1008 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2990706fa0fa4c9b8810ea9ff22f1008 2024-11-24T16:52:37,987 INFO [M:0;e335232f7491:43593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2990706fa0fa4c9b8810ea9ff22f1008, entries=2, sequenceid=60, filesize=5.1 K 2024-11-24T16:52:37,988 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39d4a54a56eb476fbd284cbeffc518f5 as hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/39d4a54a56eb476fbd284cbeffc518f5 2024-11-24T16:52:37,992 INFO [M:0;e335232f7491:43593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/39d4a54a56eb476fbd284cbeffc518f5, entries=1, sequenceid=60, filesize=4.9 K 2024-11-24T16:52:37,994 INFO [M:0;e335232f7491:43593 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 534ms, sequenceid=60, compaction requested=false 2024-11-24T16:52:37,995 INFO [M:0;e335232f7491:43593 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:37,995 DEBUG [M:0;e335232f7491:43593 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467157459Disabling compacts and flushes for region at 1732467157459Disabling writes for close at 1732467157459Obtaining lock to block concurrent updates at 1732467157459Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467157459Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732467157460 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467157461 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467157461Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467157481 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467157481Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467157895 (+414 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467157909 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467157909Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467157920 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467157935 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467157935Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467157946 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467157960 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467157960Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2caf315c: reopening flushed file at 1732467157971 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7378c06e: reopening flushed file at 1732467157976 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2757c7f9: reopening flushed file at 1732467157982 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15f63000: reopening flushed file at 1732467157987 (+5 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 534ms, sequenceid=60, compaction requested=false at 1732467157994 (+7 ms)Writing region close event to WAL at 1732467157995 (+1 ms)Closed at 1732467157995 2024-11-24T16:52:37,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,996 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,996 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,996 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:37,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43629 is added to blk_1073741890_1073 (size=1045) 2024-11-24T16:52:37,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44831 is added to blk_1073741890_1073 (size=1045) 2024-11-24T16:52:38,082 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3bcdf559 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-481434126-172.17.0.2-1732467112819:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:38531,null,null]) java.net.ConnectException: Call From e335232f7491/172.17.0.2 to localhost:44515 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T16:52:38,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:38,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:38,895 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/WALs/e335232f7491,43593,1732467113624/e335232f7491%2C43593%2C1732467113624.1732467113785 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/oldWALs/e335232f7491%2C43593%2C1732467113624.1732467113785 2024-11-24T16:52:38,898 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/MasterData/oldWALs/e335232f7491%2C43593%2C1732467113624.1732467113785 to hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/oldWALs/e335232f7491%2C43593%2C1732467113624.1732467113785$masterlocalwal$ 2024-11-24T16:52:38,898 INFO [M:0;e335232f7491:43593 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:52:38,898 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:52:38,898 INFO [M:0;e335232f7491:43593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43593 2024-11-24T16:52:38,899 INFO [M:0;e335232f7491:43593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:52:39,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:52:39,001 INFO [M:0;e335232f7491:43593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:52:39,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43593-0x1014ae429160000, quorum=127.0.0.1:63785, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:52:39,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@402253c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:39,003 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5dd0b56c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:39,003 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:39,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc86a20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:39,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719d00c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:39,005 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:39,005 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:39,005 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:39,005 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-481434126-172.17.0.2-1732467112819 (Datanode Uuid 61c9693b-f575-4f3d-a37d-37fe8ba7ee69) service to localhost/127.0.0.1:37129 2024-11-24T16:52:39,005 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@434f4a22 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38531,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44515 , LocalHost:localPort e335232f7491/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T16:52:39,006 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@434f4a22 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-481434126-172.17.0.2-1732467112819:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:44831,null,null], DatanodeInfoWithStorage[127.0.0.1:38531,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-481434126-172.17.0.2-1732467112819 2024-11-24T16:52:39,006 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@434f4a22 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38531,null,null]) java.io.IOException: No block pool offer service for bpid=BP-481434126-172.17.0.2-1732467112819 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:39,006 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@434f4a22 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44831,null,null]) java.io.IOException: No block pool offer service for bpid=BP-481434126-172.17.0.2-1732467112819 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:39,006 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@434f4a22 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38531,null,null], DatanodeInfoWithStorage[127.0.0.1:44831,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-481434126-172.17.0.2-1732467112819:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38531,null,null], DatanodeInfoWithStorage[127.0.0.1:44831,null,null]] 2024-11-24T16:52:39,006 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data3/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:39,006 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data4/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:39,007 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:39,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39835cdd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:39,009 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12b19204{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:39,009 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:39,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3afd309b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:39,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@712f5f14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:39,010 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:39,010 WARN [BP-481434126-172.17.0.2-1732467112819 heartbeating to localhost/127.0.0.1:37129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-481434126-172.17.0.2-1732467112819 (Datanode Uuid fb451156-f481-4fd3-8d9d-eaa0f33405e6) service to localhost/127.0.0.1:37129 2024-11-24T16:52:39,010 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:39,011 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:39,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data7/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:39,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/cluster_105d0222-84bd-39c5-3568-5f2c12cd3ad2/data/data8/current/BP-481434126-172.17.0.2-1732467112819 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:39,011 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:39,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cd2a640{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:52:39,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:39,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:39,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:39,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:39,026 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:52:39,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:52:39,064 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37129 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37129 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37129 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37129 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007ff900bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37129 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:42105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37129 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37129 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007ff900bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=162 (was 75) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5925 (was 6959) 2024-11-24T16:52:39,071 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=162, ProcessCount=11, AvailableMemoryMB=5925 2024-11-24T16:52:39,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:52:39,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.log.dir so I do NOT create it in target/test-data/9778ece6-5dba-f683-f85d-794c2439525c 2024-11-24T16:52:39,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/095100c2-5612-8080-34f5-cdbcd34e2dca/hadoop.tmp.dir so I do NOT create it in target/test-data/9778ece6-5dba-f683-f85d-794c2439525c 2024-11-24T16:52:39,071 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c, deleteOnExit=true 2024-11-24T16:52:39,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/test.cache.data in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:52:39,072 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:52:39,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:52:39,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:52:39,087 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:52:39,167 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:39,173 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:39,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:39,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:39,175 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:39,176 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:39,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@affd6eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:39,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65e5bfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:39,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e54696a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-46825-hadoop-hdfs-3_4_1-tests_jar-_-any-17971652566955513390/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:52:39,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@228e200c{HTTP/1.1, (http/1.1)}{localhost:46825} 2024-11-24T16:52:39,293 INFO [Time-limited test {}] server.Server(415): Started @150255ms 2024-11-24T16:52:39,306 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:52:39,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:39,380 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:39,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:39,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:39,381 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:39,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371f8296{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:39,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5493c38f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:39,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e4cbcc2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-43015-hadoop-hdfs-3_4_1-tests_jar-_-any-7851419112190981738/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:39,497 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fab170c{HTTP/1.1, (http/1.1)}{localhost:43015} 2024-11-24T16:52:39,498 INFO [Time-limited test {}] server.Server(415): Started @150460ms 2024-11-24T16:52:39,499 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:39,528 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:39,532 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:39,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:39,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:39,533 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:39,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@771a779f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:39,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50fbcccf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:39,624 WARN [Thread-1190 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data1/current/BP-1845562487-172.17.0.2-1732467159106/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:39,624 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data2/current/BP-1845562487-172.17.0.2-1732467159106/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:39,641 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a9144e4f8e88c80 with lease ID 0x81286d3bcb2b095f: Processing first storage report for DS-74588b0d-9974-4d3a-81c5-3b5ee0677613 from datanode DatanodeRegistration(127.0.0.1:37059, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=40337, infoSecurePort=0, ipcPort=38709, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106) 2024-11-24T16:52:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a9144e4f8e88c80 with lease ID 0x81286d3bcb2b095f: from storage DS-74588b0d-9974-4d3a-81c5-3b5ee0677613 node DatanodeRegistration(127.0.0.1:37059, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=40337, infoSecurePort=0, ipcPort=38709, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a9144e4f8e88c80 with lease ID 0x81286d3bcb2b095f: Processing first storage report for DS-1eb35a8a-9ccb-4250-bb64-52aa2fa3e186 from datanode DatanodeRegistration(127.0.0.1:37059, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=40337, infoSecurePort=0, ipcPort=38709, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106) 2024-11-24T16:52:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a9144e4f8e88c80 with lease ID 0x81286d3bcb2b095f: from storage DS-1eb35a8a-9ccb-4250-bb64-52aa2fa3e186 node DatanodeRegistration(127.0.0.1:37059, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=40337, infoSecurePort=0, ipcPort=38709, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:39,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e8ebafe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-43895-hadoop-hdfs-3_4_1-tests_jar-_-any-14233278988991689210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:39,656 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2835f29c{HTTP/1.1, (http/1.1)}{localhost:43895} 2024-11-24T16:52:39,656 INFO [Time-limited test {}] server.Server(415): Started @150618ms 2024-11-24T16:52:39,657 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:39,751 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data3/current/BP-1845562487-172.17.0.2-1732467159106/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:39,751 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data4/current/BP-1845562487-172.17.0.2-1732467159106/current, will proceed with Du for space computation calculation, 2024-11-24T16:52:39,767 WARN [Thread-1205 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:39,769 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x676ac1b7903b98cc with lease ID 0x81286d3bcb2b0960: Processing first storage report for DS-a709033d-6a66-4826-be05-ebe8eec77087 from datanode DatanodeRegistration(127.0.0.1:45387, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=40447, infoSecurePort=0, ipcPort=36753, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106) 2024-11-24T16:52:39,769 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x676ac1b7903b98cc with lease ID 0x81286d3bcb2b0960: from storage DS-a709033d-6a66-4826-be05-ebe8eec77087 node DatanodeRegistration(127.0.0.1:45387, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=40447, infoSecurePort=0, ipcPort=36753, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:39,769 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x676ac1b7903b98cc with lease ID 0x81286d3bcb2b0960: Processing first storage report for DS-02df4fdf-f444-4522-9ed8-da8f591567c0 from datanode DatanodeRegistration(127.0.0.1:45387, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=40447, infoSecurePort=0, ipcPort=36753, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106) 2024-11-24T16:52:39,769 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x676ac1b7903b98cc with lease ID 0x81286d3bcb2b0960: from storage DS-02df4fdf-f444-4522-9ed8-da8f591567c0 node DatanodeRegistration(127.0.0.1:45387, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=40447, infoSecurePort=0, ipcPort=36753, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:39,782 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c 2024-11-24T16:52:39,785 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/zookeeper_0, clientPort=62232, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:52:39,786 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62232 2024-11-24T16:52:39,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:52:39,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:52:39,797 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef with version=8 2024-11-24T16:52:39,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:52:39,799 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:52:39,799 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:52:39,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:52:39,800 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:52:39,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:52:39,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:52:39,800 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:52:39,800 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:52:39,801 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34461 2024-11-24T16:52:39,802 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34461 connecting to ZooKeeper ensemble=127.0.0.1:62232 2024-11-24T16:52:39,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344610x0, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:52:39,813 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34461-0x1014ae4dd780000 connected 2024-11-24T16:52:39,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,830 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:52:39,830 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef, hbase.cluster.distributed=false 2024-11-24T16:52:39,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:52:39,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34461 2024-11-24T16:52:39,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34461 2024-11-24T16:52:39,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34461 2024-11-24T16:52:39,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34461 2024-11-24T16:52:39,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34461 2024-11-24T16:52:39,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:39,848 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:52:39,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:52:39,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:52:39,848 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:52:39,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:52:39,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:52:39,848 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:52:39,849 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:52:39,849 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39941 2024-11-24T16:52:39,850 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39941 connecting to ZooKeeper ensemble=127.0.0.1:62232 2024-11-24T16:52:39,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:39,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399410x0, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:52:39,861 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:52:39,861 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39941-0x1014ae4dd780001 connected 2024-11-24T16:52:39,861 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:52:39,861 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:52:39,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:52:39,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:52:39,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39941 2024-11-24T16:52:39,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39941 2024-11-24T16:52:39,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39941 2024-11-24T16:52:39,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39941 2024-11-24T16:52:39,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39941 2024-11-24T16:52:39,876 DEBUG [M:0;e335232f7491:34461 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:34461 2024-11-24T16:52:39,876 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,34461,1732467159799 2024-11-24T16:52:39,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:52:39,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:52:39,878 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,34461,1732467159799 2024-11-24T16:52:39,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:52:39,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,880 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:52:39,881 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,34461,1732467159799 from backup master directory 2024-11-24T16:52:39,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,34461,1732467159799 2024-11-24T16:52:39,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:52:39,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:52:39,882 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:52:39,882 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,34461,1732467159799 2024-11-24T16:52:39,886 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/hbase.id] with ID: 66af43bc-52af-4c06-9a3a-cdf76e5f138b 2024-11-24T16:52:39,886 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/.tmp/hbase.id 2024-11-24T16:52:39,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:52:39,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:52:39,893 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/.tmp/hbase.id]:[hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/hbase.id] 2024-11-24T16:52:39,905 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:39,905 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:52:39,906 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T16:52:39,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:52:39,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:52:39,916 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:52:39,917 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:52:39,917 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:52:39,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:52:39,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:52:39,925 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store 2024-11-24T16:52:39,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:52:39,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:52:39,931 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:52:39,931 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:52:39,931 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:39,931 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:39,931 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:52:39,931 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:39,931 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:52:39,931 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467159931Disabling compacts and flushes for region at 1732467159931Disabling writes for close at 1732467159931Writing region close event to WAL at 1732467159931Closed at 1732467159931 2024-11-24T16:52:39,932 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/.initializing 2024-11-24T16:52:39,933 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799 2024-11-24T16:52:39,935 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C34461%2C1732467159799, suffix=, logDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799, archiveDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/oldWALs, maxLogs=10 2024-11-24T16:52:39,936 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C34461%2C1732467159799.1732467159936 2024-11-24T16:52:39,940 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 2024-11-24T16:52:39,941 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40447:40447),(127.0.0.1/127.0.0.1:40337:40337)] 2024-11-24T16:52:39,944 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:52:39,944 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:52:39,944 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,945 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,946 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:52:39,947 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:39,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:39,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,949 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:52:39,949 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:39,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:52:39,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:52:39,951 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:39,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:52:39,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:52:39,953 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:39,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:52:39,954 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,954 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,955 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,956 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,956 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,956 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:52:39,958 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:52:39,960 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:52:39,960 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778499, jitterRate=-0.010087639093399048}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:52:39,961 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467159945Initializing all the Stores at 1732467159945Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467159945Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467159946 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467159946Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467159946Cleaning up temporary data from old regions at 1732467159956 (+10 ms)Region opened successfully at 1732467159961 (+5 ms) 2024-11-24T16:52:39,961 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:52:39,964 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36096676, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:52:39,965 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:52:39,965 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:52:39,965 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:52:39,965 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:52:39,966 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T16:52:39,966 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T16:52:39,966 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:52:39,968 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:52:39,969 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:52:39,970 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:52:39,970 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:52:39,971 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:52:39,972 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:52:39,972 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:52:39,973 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:52:39,976 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:52:39,977 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:52:39,981 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:52:39,983 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:52:39,984 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:52:39,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:52:39,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:52:39,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,986 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,34461,1732467159799, sessionid=0x1014ae4dd780000, setting cluster-up flag (Was=false) 2024-11-24T16:52:39,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:39,994 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:52:39,995 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,34461,1732467159799 2024-11-24T16:52:40,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:40,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:40,006 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:52:40,007 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,34461,1732467159799 2024-11-24T16:52:40,008 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:52:40,010 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:52:40,010 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:52:40,010 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:52:40,011 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,34461,1732467159799 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:52:40,012 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,014 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:52:40,014 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:52:40,014 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467190014 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,015 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,015 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:52:40,016 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:52:40,016 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:52:40,015 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:52:40,016 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:52:40,016 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:52:40,016 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467160016,5,FailOnTimeoutGroup] 2024-11-24T16:52:40,019 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467160016,5,FailOnTimeoutGroup] 2024-11-24T16:52:40,019 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,019 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:52:40,019 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,019 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:52:40,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:52:40,028 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:52:40,028 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef 2024-11-24T16:52:40,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:52:40,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:52:40,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:52:40,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:52:40,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:52:40,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:52:40,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:52:40,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:52:40,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:52:40,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:52:40,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:52:40,047 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:52:40,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740 2024-11-24T16:52:40,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740 2024-11-24T16:52:40,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:52:40,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:52:40,051 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:52:40,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:52:40,055 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:52:40,055 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=704596, jitterRate=-0.10405990481376648}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:52:40,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467160037Initializing all the Stores at 1732467160038 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467160038Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467160040 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467160040Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467160040Cleaning up temporary data from old regions at 1732467160050 (+10 ms)Region opened successfully at 1732467160056 (+6 ms) 2024-11-24T16:52:40,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:52:40,056 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:52:40,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:52:40,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:52:40,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:52:40,057 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:52:40,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467160056Disabling compacts and flushes for region at 1732467160056Disabling writes for close at 1732467160056Writing region close event to WAL at 1732467160056Closed at 1732467160056 2024-11-24T16:52:40,058 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:52:40,058 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:52:40,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:52:40,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:52:40,061 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:52:40,066 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(746): ClusterId : 66af43bc-52af-4c06-9a3a-cdf76e5f138b 2024-11-24T16:52:40,066 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:52:40,069 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:52:40,069 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:52:40,071 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:52:40,071 DEBUG [RS:0;e335232f7491:39941 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@423a9e66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:52:40,089 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:39941 2024-11-24T16:52:40,089 INFO [RS:0;e335232f7491:39941 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:52:40,089 INFO [RS:0;e335232f7491:39941 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:52:40,089 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:52:40,090 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,34461,1732467159799 with port=39941, startcode=1732467159848 2024-11-24T16:52:40,090 DEBUG [RS:0;e335232f7491:39941 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:52:40,093 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34707, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:52:40,093 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,39941,1732467159848 2024-11-24T16:52:40,093 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34461 {}] master.ServerManager(517): Registering regionserver=e335232f7491,39941,1732467159848 2024-11-24T16:52:40,095 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef 2024-11-24T16:52:40,095 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34209 2024-11-24T16:52:40,095 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:52:40,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:52:40,097 DEBUG [RS:0;e335232f7491:39941 {}] zookeeper.ZKUtil(111): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,39941,1732467159848 2024-11-24T16:52:40,097 WARN [RS:0;e335232f7491:39941 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:52:40,097 INFO [RS:0;e335232f7491:39941 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:52:40,097 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848 2024-11-24T16:52:40,097 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,39941,1732467159848] 2024-11-24T16:52:40,101 INFO [RS:0;e335232f7491:39941 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:52:40,104 INFO [RS:0;e335232f7491:39941 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:52:40,105 INFO [RS:0;e335232f7491:39941 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:52:40,105 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,105 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:52:40,106 INFO [RS:0;e335232f7491:39941 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:52:40,106 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:52:40,106 DEBUG [RS:0;e335232f7491:39941 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:52:40,107 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,107 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,107 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,107 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,107 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,107 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,39941,1732467159848-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:52:40,123 INFO [RS:0;e335232f7491:39941 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:52:40,123 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,39941,1732467159848-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,123 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,123 INFO [RS:0;e335232f7491:39941 {}] regionserver.Replication(171): e335232f7491,39941,1732467159848 started 2024-11-24T16:52:40,137 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,137 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,39941,1732467159848, RpcServer on e335232f7491/172.17.0.2:39941, sessionid=0x1014ae4dd780001 2024-11-24T16:52:40,138 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:52:40,138 DEBUG [RS:0;e335232f7491:39941 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,39941,1732467159848 2024-11-24T16:52:40,138 DEBUG [RS:0;e335232f7491:39941 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,39941,1732467159848' 2024-11-24T16:52:40,138 DEBUG [RS:0;e335232f7491:39941 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:52:40,138 DEBUG [RS:0;e335232f7491:39941 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,39941,1732467159848 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,39941,1732467159848' 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:52:40,139 DEBUG [RS:0;e335232f7491:39941 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:52:40,140 INFO [RS:0;e335232f7491:39941 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:52:40,140 INFO [RS:0;e335232f7491:39941 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:52:40,211 WARN [e335232f7491:34461 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T16:52:40,242 INFO [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C39941%2C1732467159848, suffix=, logDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848, archiveDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs, maxLogs=32 2024-11-24T16:52:40,243 INFO [RS:0;e335232f7491:39941 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:52:40,249 INFO [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:52:40,251 DEBUG [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40447:40447),(127.0.0.1/127.0.0.1:40337:40337)] 2024-11-24T16:52:40,461 DEBUG [e335232f7491:34461 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:52:40,462 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,39941,1732467159848 2024-11-24T16:52:40,463 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,39941,1732467159848, state=OPENING 2024-11-24T16:52:40,466 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:52:40,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:40,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:52:40,468 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:52:40,468 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:52:40,468 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:52:40,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,39941,1732467159848}] 2024-11-24T16:52:40,622 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:52:40,624 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36011, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:52:40,627 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:52:40,628 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:52:40,629 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C39941%2C1732467159848.meta, suffix=.meta, logDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848, archiveDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs, maxLogs=32 2024-11-24T16:52:40,630 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta 2024-11-24T16:52:40,635 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta 2024-11-24T16:52:40,636 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40337:40337),(127.0.0.1/127.0.0.1:40447:40447)] 2024-11-24T16:52:40,637 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:52:40,637 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:52:40,638 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:52:40,638 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:52:40,638 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:52:40,638 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:52:40,638 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:52:40,638 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:52:40,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:52:40,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:52:40,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:52:40,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:52:40,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:52:40,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:52:40,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:52:40,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:52:40,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:52:40,644 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:52:40,645 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740 2024-11-24T16:52:40,646 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740 2024-11-24T16:52:40,647 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:52:40,647 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:52:40,648 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:52:40,649 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:52:40,650 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840026, jitterRate=0.06814847886562347}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:52:40,650 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:52:40,651 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467160638Writing region info on filesystem at 1732467160638Initializing all the Stores at 1732467160639 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467160639Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467160639Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467160639Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467160639Cleaning up temporary data from old regions at 1732467160647 (+8 ms)Running coprocessor post-open hooks at 1732467160650 (+3 ms)Region opened successfully at 1732467160651 (+1 ms) 2024-11-24T16:52:40,652 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467160621 2024-11-24T16:52:40,654 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:52:40,655 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:52:40,655 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,39941,1732467159848 2024-11-24T16:52:40,656 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,39941,1732467159848, state=OPEN 2024-11-24T16:52:40,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:52:40,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:52:40,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:52:40,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:52:40,661 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,39941,1732467159848 2024-11-24T16:52:40,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:52:40,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,39941,1732467159848 in 193 msec 2024-11-24T16:52:40,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:52:40,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-24T16:52:40,668 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:52:40,668 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:52:40,669 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:52:40,669 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,39941,1732467159848, seqNum=-1] 2024-11-24T16:52:40,670 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:52:40,671 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49913, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:52:40,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-24T16:52:40,676 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467160676, completionTime=-1 2024-11-24T16:52:40,676 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:52:40,676 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467220678 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467280678 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,34461,1732467159799-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,34461,1732467159799-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,34461,1732467159799-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:34461, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,678 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,679 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,680 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.800sec 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,34461,1732467159799-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:52:40,682 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,34461,1732467159799-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:52:40,684 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:52:40,684 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:52:40,684 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,34461,1732467159799-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:52:40,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c10cfb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:52:40,767 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,34461,-1 for getting cluster id 2024-11-24T16:52:40,767 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:52:40,768 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '66af43bc-52af-4c06-9a3a-cdf76e5f138b' 2024-11-24T16:52:40,769 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:52:40,769 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "66af43bc-52af-4c06-9a3a-cdf76e5f138b" 2024-11-24T16:52:40,769 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a422eef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:52:40,769 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,34461,-1] 2024-11-24T16:52:40,770 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:52:40,770 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:52:40,771 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:52:40,772 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d866131, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:52:40,772 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:52:40,773 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,39941,1732467159848, seqNum=-1] 2024-11-24T16:52:40,774 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:52:40,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:52:40,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,34461,1732467159799 2024-11-24T16:52:40,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:52:40,779 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:52:40,779 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-24T16:52:40,779 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-24T16:52:40,780 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T16:52:40,780 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is e335232f7491,34461,1732467159799 2024-11-24T16:52:40,780 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@b4974af 2024-11-24T16:52:40,781 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T16:52:40,783 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T16:52:40,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T16:52:40,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T16:52:40,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:52:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T16:52:40,786 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T16:52:40,786 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:40,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-24T16:52:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:52:40,787 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T16:52:40,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741835_1011 (size=395) 2024-11-24T16:52:40,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741835_1011 (size=395) 2024-11-24T16:52:40,796 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0649235964cb53366e714cb60b4a3e19, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef 2024-11-24T16:52:40,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741836_1012 (size=78) 2024-11-24T16:52:40,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37059 is added to blk_1073741836_1012 (size=78) 2024-11-24T16:52:40,802 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:52:40,802 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 0649235964cb53366e714cb60b4a3e19, disabling compactions & flushes 2024-11-24T16:52:40,803 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:40,803 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:40,803 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. after waiting 0 ms 2024-11-24T16:52:40,803 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:40,803 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:40,803 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0649235964cb53366e714cb60b4a3e19: Waiting for close lock at 1732467160802Disabling compacts and flushes for region at 1732467160802Disabling writes for close at 1732467160803 (+1 ms)Writing region close event to WAL at 1732467160803Closed at 1732467160803 2024-11-24T16:52:40,804 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T16:52:40,805 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732467160805"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467160805"}]},"ts":"1732467160805"} 2024-11-24T16:52:40,807 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T16:52:40,809 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T16:52:40,809 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467160809"}]},"ts":"1732467160809"} 2024-11-24T16:52:40,811 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-24T16:52:40,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0649235964cb53366e714cb60b4a3e19, ASSIGN}] 2024-11-24T16:52:40,813 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0649235964cb53366e714cb60b4a3e19, ASSIGN 2024-11-24T16:52:40,814 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0649235964cb53366e714cb60b4a3e19, ASSIGN; state=OFFLINE, location=e335232f7491,39941,1732467159848; forceNewPlan=false, retain=false 2024-11-24T16:52:40,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:40,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:40,965 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0649235964cb53366e714cb60b4a3e19, regionState=OPENING, regionLocation=e335232f7491,39941,1732467159848 2024-11-24T16:52:40,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0649235964cb53366e714cb60b4a3e19, ASSIGN because future has completed 2024-11-24T16:52:40,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0649235964cb53366e714cb60b4a3e19, server=e335232f7491,39941,1732467159848}] 2024-11-24T16:52:41,125 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:41,126 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0649235964cb53366e714cb60b4a3e19, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:52:41,126 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,126 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:52:41,126 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,126 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,127 INFO [StoreOpener-0649235964cb53366e714cb60b4a3e19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,129 INFO [StoreOpener-0649235964cb53366e714cb60b4a3e19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0649235964cb53366e714cb60b4a3e19 columnFamilyName info 2024-11-24T16:52:41,129 DEBUG [StoreOpener-0649235964cb53366e714cb60b4a3e19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:52:41,130 INFO [StoreOpener-0649235964cb53366e714cb60b4a3e19-1 {}] regionserver.HStore(327): Store=0649235964cb53366e714cb60b4a3e19/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:52:41,130 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,130 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,131 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,131 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,131 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,133 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,135 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:52:41,136 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0649235964cb53366e714cb60b4a3e19; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705396, jitterRate=-0.10304313898086548}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:52:41,136 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:52:41,136 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0649235964cb53366e714cb60b4a3e19: Running coprocessor pre-open hook at 1732467161126Writing region info on filesystem at 1732467161126Initializing all the Stores at 1732467161127 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467161127Cleaning up temporary data from old regions at 1732467161131 (+4 ms)Running coprocessor post-open hooks at 1732467161136 (+5 ms)Region opened successfully at 1732467161136 2024-11-24T16:52:41,138 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19., pid=6, masterSystemTime=1732467161121 2024-11-24T16:52:41,140 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:41,140 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:41,141 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0649235964cb53366e714cb60b4a3e19, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,39941,1732467159848 2024-11-24T16:52:41,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0649235964cb53366e714cb60b4a3e19, server=e335232f7491,39941,1732467159848 because future has completed 2024-11-24T16:52:41,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T16:52:41,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0649235964cb53366e714cb60b4a3e19, server=e335232f7491,39941,1732467159848 in 177 msec 2024-11-24T16:52:41,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T16:52:41,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0649235964cb53366e714cb60b4a3e19, ASSIGN in 337 msec 2024-11-24T16:52:41,152 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T16:52:41,153 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467161153"}]},"ts":"1732467161153"} 2024-11-24T16:52:41,155 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-24T16:52:41,156 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T16:52:41,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 372 msec 2024-11-24T16:52:41,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:41,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:42,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:42,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:43,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:43,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:44,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T16:52:44,301 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T16:52:44,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T16:52:44,302 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-24T16:52:44,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:52:44,302 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T16:52:44,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:44,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:45,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:45,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:46,141 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:52:46,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:52:46,173 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T16:52:46,174 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-24T16:52:46,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:46,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:47,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:47,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:48,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:48,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:49,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:49,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:50,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:52:50,802 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-24T16:52:50,803 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-24T16:52:50,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T16:52:50,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:52:50,809 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19., hostname=e335232f7491,39941,1732467159848, seqNum=2] 2024-11-24T16:52:50,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:50,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:51,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:51,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:52,812 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:52:52,813 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:52,813 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:52,813 WARN [DataStreamer for file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 block BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK], DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]) is bad. 2024-11-24T16:52:52,813 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:52,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846620625_22 at /127.0.0.1:49050 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45387:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49050 dst: /127.0.0.1:45387 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,815 WARN [DataStreamer for file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta block BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK], DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]) is bad. 2024-11-24T16:52:52,815 WARN [PacketResponder: BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45387] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846620625_22 at /127.0.0.1:58964 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37059:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58964 dst: /127.0.0.1:37059 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:49066 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45387:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49066 dst: /127.0.0.1:45387 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:59010 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37059:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59010 dst: /127.0.0.1:37059 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:59026 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37059:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59026 dst: /127.0.0.1:37059 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,816 WARN [DataStreamer for file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 block BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK], DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45387,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]) is bad. 2024-11-24T16:52:52,816 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:49072 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45387:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49072 dst: /127.0.0.1:45387 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,818 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e8ebafe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:52,818 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2835f29c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:52,818 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:52,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50fbcccf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:52,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@771a779f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:52,820 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:52,820 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:52,820 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845562487-172.17.0.2-1732467159106 (Datanode Uuid fc58d58b-ec27-4140-8039-694920e754c0) service to localhost/127.0.0.1:34209 2024-11-24T16:52:52,820 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:52,821 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data3/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:52,821 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data4/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:52,821 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:52,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:52,846 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:52,847 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:52,847 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:52,847 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:52,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12f241e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:52,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55cf8f7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:52,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:52,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:52,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bf6b78a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-44115-hadoop-hdfs-3_4_1-tests_jar-_-any-11722467157106796477/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:52,968 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dd2ccd0{HTTP/1.1, (http/1.1)}{localhost:44115} 2024-11-24T16:52:52,968 INFO [Time-limited test {}] server.Server(415): Started @163930ms 2024-11-24T16:52:52,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:52,989 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:52,989 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:52,990 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:52,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:52570 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37059:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52570 dst: /127.0.0.1:37059 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:52562 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37059:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52562 dst: /127.0.0.1:37059 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846620625_22 at /127.0.0.1:52564 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37059:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52564 dst: /127.0.0.1:37059 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:52,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e4cbcc2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:52,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fab170c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:52,998 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:52,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5493c38f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:52,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371f8296{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:52,999 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:52,999 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845562487-172.17.0.2-1732467159106 (Datanode Uuid 646f4b75-dde7-4682-99de-65bd4c48f64b) service to localhost/127.0.0.1:34209 2024-11-24T16:52:52,999 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:52,999 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:53,000 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data1/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:53,000 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data2/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:53,000 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:53,013 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:53,017 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:53,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:53,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:53,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:52:53,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f4861a6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:53,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e87d64e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:53,071 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:53,073 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3e20453a3196f49 with lease ID 0x81286d3bcb2b0961: from storage DS-a709033d-6a66-4826-be05-ebe8eec77087 node DatanodeRegistration(127.0.0.1:41795, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=44791, infoSecurePort=0, ipcPort=39239, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:53,073 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3e20453a3196f49 with lease ID 0x81286d3bcb2b0961: from storage DS-02df4fdf-f444-4522-9ed8-da8f591567c0 node DatanodeRegistration(127.0.0.1:41795, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=44791, infoSecurePort=0, ipcPort=39239, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:53,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@355539f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-43721-hadoop-hdfs-3_4_1-tests_jar-_-any-2049565779469046247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:53,142 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d3d6246{HTTP/1.1, (http/1.1)}{localhost:43721} 2024-11-24T16:52:53,142 INFO [Time-limited test {}] server.Server(415): Started @164104ms 2024-11-24T16:52:53,143 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:53,238 WARN [Thread-1371 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:53,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b0c2c17844c5dba with lease ID 0x81286d3bcb2b0962: from storage DS-74588b0d-9974-4d3a-81c5-3b5ee0677613 node DatanodeRegistration(127.0.0.1:35405, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=37467, infoSecurePort=0, ipcPort=33321, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:53,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b0c2c17844c5dba with lease ID 0x81286d3bcb2b0962: from storage DS-1eb35a8a-9ccb-4250-bb64-52aa2fa3e186 node DatanodeRegistration(127.0.0.1:35405, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=37467, infoSecurePort=0, ipcPort=33321, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:53,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:53,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:54,163 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-24T16:52:54,166 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-24T16:52:54,168 ERROR [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:54,168 WARN [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:54,168 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39941%2C1732467159848:(num 1732467160242) roll requested 2024-11-24T16:52:54,168 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:52:54,177 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 newFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:52:54,177 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:54,177 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:54,177 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:54,177 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:54,177 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:54,178 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:52:54,178 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:54,178 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:54,178 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:52:54,178 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44791:44791),(127.0.0.1/127.0.0.1:37467:37467)] 2024-11-24T16:52:54,179 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 is not closed yet, will try archiving it next time 2024-11-24T16:52:54,179 WARN [IPC Server handler 4 on default port 34209 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-24T16:52:54,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 after 1ms 2024-11-24T16:52:54,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:54,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:55,073 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T16:52:55,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:55,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:56,182 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-24T16:52:56,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:56,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:57,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:57,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:58,180 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 after 4002ms 2024-11-24T16:52:58,185 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:35405,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:58,186 WARN [DataStreamer for file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 block BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41795,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK], DatanodeInfoWithStorage[127.0.0.1:35405,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35405,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]) is bad. 2024-11-24T16:52:58,186 WARN [PacketResponder: BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35405] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:58,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:42924 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42924 dst: /127.0.0.1:41795 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:58,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:40210 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40210 dst: /127.0.0.1:35405 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:58,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@355539f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:58,189 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d3d6246{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:58,190 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:58,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e87d64e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:58,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f4861a6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:58,191 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:58,191 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:58,191 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:58,191 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845562487-172.17.0.2-1732467159106 (Datanode Uuid 646f4b75-dde7-4682-99de-65bd4c48f64b) service to localhost/127.0.0.1:34209 2024-11-24T16:52:58,192 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data1/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:58,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data2/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:58,193 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:58,207 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:58,211 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:58,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:58,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:58,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:52:58,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67d70e61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:58,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@290ac13e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:58,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5036ecf5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-38201-hadoop-hdfs-3_4_1-tests_jar-_-any-14970683007892159701/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:58,342 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2647e756{HTTP/1.1, (http/1.1)}{localhost:38201} 2024-11-24T16:52:58,342 INFO [Time-limited test {}] server.Server(415): Started @169304ms 2024-11-24T16:52:58,344 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:58,373 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:58,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1113607199_22 at /127.0.0.1:42950 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42950 dst: /127.0.0.1:41795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:52:58,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bf6b78a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:58,381 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dd2ccd0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:52:58,381 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:52:58,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55cf8f7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:52:58,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12f241e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:52:58,382 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:52:58,382 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:52:58,382 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845562487-172.17.0.2-1732467159106 (Datanode Uuid fc58d58b-ec27-4140-8039-694920e754c0) service to localhost/127.0.0.1:34209 2024-11-24T16:52:58,382 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:52:58,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data3/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:58,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data4/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:52:58,384 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:52:58,397 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:52:58,403 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:52:58,408 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:52:58,408 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:52:58,408 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:52:58,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@652ca842{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:52:58,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3172a7c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:52:58,458 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:58,461 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2721794c6d0c99b8 with lease ID 0x81286d3bcb2b0963: from storage DS-74588b0d-9974-4d3a-81c5-3b5ee0677613 node DatanodeRegistration(127.0.0.1:46767, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=43665, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:58,461 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2721794c6d0c99b8 with lease ID 0x81286d3bcb2b0963: from storage DS-1eb35a8a-9ccb-4250-bb64-52aa2fa3e186 node DatanodeRegistration(127.0.0.1:46767, datanodeUuid=646f4b75-dde7-4682-99de-65bd4c48f64b, infoPort=43665, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:58,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ed6e4b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/java.io.tmpdir/jetty-localhost-46497-hadoop-hdfs-3_4_1-tests_jar-_-any-4866272089971226414/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:52:58,526 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@929fb8a{HTTP/1.1, (http/1.1)}{localhost:46497} 2024-11-24T16:52:58,526 INFO [Time-limited test {}] server.Server(415): Started @169488ms 2024-11-24T16:52:58,528 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:52:58,630 WARN [Thread-1445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:52:58,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3886ce169112814 with lease ID 0x81286d3bcb2b0964: from storage DS-a709033d-6a66-4826-be05-ebe8eec77087 node DatanodeRegistration(127.0.0.1:36919, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=33633, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:58,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3886ce169112814 with lease ID 0x81286d3bcb2b0964: from storage DS-02df4fdf-f444-4522-9ed8-da8f591567c0 node DatanodeRegistration(127.0.0.1:36919, datanodeUuid=fc58d58b-ec27-4140-8039-694920e754c0, infoPort=33633, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=2089609732;c=1732467159106), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:52:58,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:58,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:59,550 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-24T16:52:59,552 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-24T16:52:59,553 ERROR [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41795,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:59,553 WARN [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41795,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:59,553 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39941%2C1732467159848:(num 1732467174168) roll requested 2024-11-24T16:52:59,554 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.1732467179553 2024-11-24T16:52:59,559 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 newFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 2024-11-24T16:52:59,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:59,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:59,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:59,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:59,559 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:52:59,559 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 2024-11-24T16:52:59,560 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41795,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:59,560 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41795,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:52:59,560 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:52:59,560 WARN [IPC Server handler 2 on default port 34209 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-24T16:52:59,561 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 after 1ms 2024-11-24T16:52:59,561 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33633:33633),(127.0.0.1/127.0.0.1:43665:43665)] 2024-11-24T16:52:59,561 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 is not closed yet, will try archiving it next time 2024-11-24T16:52:59,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:52:59,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:00,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T16:53:00,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:00,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:01,562 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:01,568 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 newFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:01,568 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:01,568 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:01,568 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:01,568 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:01,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:01,569 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:01,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43665:43665),(127.0.0.1/127.0.0.1:33633:33633)] 2024-11-24T16:53:01,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 is not closed yet, will try archiving it next time 2024-11-24T16:53:01,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 is not closed yet, will try archiving it next time 2024-11-24T16:53:01,570 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:53:01,570 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:53:01,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741838_1019 (size=1264) 2024-11-24T16:53:01,571 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 after 1ms 2024-11-24T16:53:01,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741838_1019 (size=1264) 2024-11-24T16:53:01,571 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:53:01,571 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 is not closed yet, will try archiving it next time 2024-11-24T16:53:01,580 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732467161137/Put/vlen=218/seqid=0] 2024-11-24T16:53:01,580 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732467170810/Put/vlen=1045/seqid=0] 2024-11-24T16:53:01,580 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467160242 2024-11-24T16:53:01,580 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:53:01,580 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:53:01,580 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 after 0ms 2024-11-24T16:53:01,580 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:53:01,583 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732467174167/Put/vlen=1045/seqid=0] 2024-11-24T16:53:01,583 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732467176183/Put/vlen=1045/seqid=0] 2024-11-24T16:53:01,584 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 2024-11-24T16:53:01,584 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 2024-11-24T16:53:01,584 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 2024-11-24T16:53:01,584 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 after 0ms 2024-11-24T16:53:01,584 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467179553 2024-11-24T16:53:01,587 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732467179553/Put/vlen=1045/seqid=0] 2024-11-24T16:53:01,587 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:01,587 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:01,587 WARN [IPC Server handler 0 on default port 34209 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-24T16:53:01,587 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 after 0ms 2024-11-24T16:53:01,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:01,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:02,463 WARN [ResponseProcessor for block BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:02,462 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846620625_22 at /127.0.0.1:54240 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54240 dst: /127.0.0.1:46767 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46767 remote=/127.0.0.1:54240]. Total timeout mills is 60000, 59105 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:53:02,463 WARN [DataStreamer for file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 block BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46767,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK], DatanodeInfoWithStorage[127.0.0.1:36919,DS-a709033d-6a66-4826-be05-ebe8eec77087,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46767,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]) is bad. 2024-11-24T16:53:02,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846620625_22 at /127.0.0.1:38998 [Receiving block BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36919:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38998 dst: /127.0.0.1:36919 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:53:02,464 WARN [DataStreamer for file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 block BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:02,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741839_1022 (size=85) 2024-11-24T16:53:02,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741839_1022 (size=85) 2024-11-24T16:53:02,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:02,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:03,562 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467174168 after 4002ms 2024-11-24T16:53:03,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:03,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:04,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:04,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:05,588 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 after 4001ms 2024-11-24T16:53:05,588 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:05,592 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:05,592 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-24T16:53:05,593 ERROR [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,593 WARN [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,593 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39941%2C1732467159848.meta:.meta(num 1732467160630) roll requested 2024-11-24T16:53:05,593 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.meta.1732467185593.meta 2024-11-24T16:53:05,598 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,598 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,598 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467185593.meta 2024-11-24T16:53:05,598 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,599 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,599 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta 2024-11-24T16:53:05,599 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43665:43665),(127.0.0.1/127.0.0.1:33633:33633)] 2024-11-24T16:53:05,599 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta is not closed yet, will try archiving it next time 2024-11-24T16:53:05,599 WARN [IPC Server handler 4 on default port 34209 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-24T16:53:05,599 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta after 0ms 2024-11-24T16:53:05,614 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/info/bcf0422815d34275a9c3d804c0f9a989 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19./info:regioninfo/1732467161141/Put/seqid=0 2024-11-24T16:53:05,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741841_1025 (size=7125) 2024-11-24T16:53:05,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741841_1025 (size=7125) 2024-11-24T16:53:05,620 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/info/bcf0422815d34275a9c3d804c0f9a989 2024-11-24T16:53:05,639 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/ns/1d0834e295ba4ec38efebf42f51067d5 is 43, key is default/ns:d/1732467160671/Put/seqid=0 2024-11-24T16:53:05,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741842_1026 (size=5153) 2024-11-24T16:53:05,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741842_1026 (size=5153) 2024-11-24T16:53:05,645 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/ns/1d0834e295ba4ec38efebf42f51067d5 2024-11-24T16:53:05,665 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/table/1360b94a62714da1bbb8019198db41a1 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732467161153/Put/seqid=0 2024-11-24T16:53:05,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741843_1027 (size=5438) 2024-11-24T16:53:05,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741843_1027 (size=5438) 2024-11-24T16:53:05,671 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/table/1360b94a62714da1bbb8019198db41a1 2024-11-24T16:53:05,676 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/info/bcf0422815d34275a9c3d804c0f9a989 as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/info/bcf0422815d34275a9c3d804c0f9a989 2024-11-24T16:53:05,681 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/info/bcf0422815d34275a9c3d804c0f9a989, entries=10, sequenceid=11, filesize=7.0 K 2024-11-24T16:53:05,682 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/ns/1d0834e295ba4ec38efebf42f51067d5 as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/ns/1d0834e295ba4ec38efebf42f51067d5 2024-11-24T16:53:05,687 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/ns/1d0834e295ba4ec38efebf42f51067d5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T16:53:05,688 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/.tmp/table/1360b94a62714da1bbb8019198db41a1 as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/table/1360b94a62714da1bbb8019198db41a1 2024-11-24T16:53:05,693 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/table/1360b94a62714da1bbb8019198db41a1, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T16:53:05,694 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-24T16:53:05,694 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T16:53:05,694 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0649235964cb53366e714cb60b4a3e19 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T16:53:05,695 ERROR [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,695 WARN [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef-prefix:e335232f7491,39941,1732467159848 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,696 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C39941%2C1732467159848:(num 1732467181562) roll requested 2024-11-24T16:53:05,696 INFO [regionserver/e335232f7491:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C39941%2C1732467159848.1732467185696 2024-11-24T16:53:05,701 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 newFile=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467185696 2024-11-24T16:53:05,701 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,701 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,701 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,701 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,701 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,701 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467185696 2024-11-24T16:53:05,701 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,702 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1845562487-172.17.0.2-1732467159106:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:05,702 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:05,702 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 after 0ms 2024-11-24T16:53:05,704 DEBUG [regionserver/e335232f7491:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43665:43665),(127.0.0.1/127.0.0.1:33633:33633)] 2024-11-24T16:53:05,705 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.1732467181562 to hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs/e335232f7491%2C39941%2C1732467159848.1732467181562 2024-11-24T16:53:05,726 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/.tmp/info/af724498d75f4b30a6a21e2a2adc635a is 1080, key is row1002/info:/1732467170810/Put/seqid=0 2024-11-24T16:53:05,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741845_1029 (size=9270) 2024-11-24T16:53:05,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741845_1029 (size=9270) 2024-11-24T16:53:05,731 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/.tmp/info/af724498d75f4b30a6a21e2a2adc635a 2024-11-24T16:53:05,738 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/.tmp/info/af724498d75f4b30a6a21e2a2adc635a as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/info/af724498d75f4b30a6a21e2a2adc635a 2024-11-24T16:53:05,744 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/info/af724498d75f4b30a6a21e2a2adc635a, entries=4, sequenceid=8, filesize=9.1 K 2024-11-24T16:53:05,745 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 0649235964cb53366e714cb60b4a3e19 in 51ms, sequenceid=8, compaction requested=false 2024-11-24T16:53:05,745 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0649235964cb53366e714cb60b4a3e19: 2024-11-24T16:53:05,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:53:05,751 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:53:05,751 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:53:05,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:53:05,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:53:05,752 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:53:05,752 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:53:05,752 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1049238139, stopped=false 2024-11-24T16:53:05,752 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,34461,1732467159799 2024-11-24T16:53:05,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:53:05,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:53:05,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:05,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:05,754 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:53:05,754 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:53:05,754 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:53:05,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:53:05,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:53:05,755 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,39941,1732467159848' ***** 2024-11-24T16:53:05,755 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:53:05,755 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:53:05,755 INFO [RS:0;e335232f7491:39941 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:53:05,755 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:53:05,755 INFO [RS:0;e335232f7491:39941 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:53:05,755 INFO [RS:0;e335232f7491:39941 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:53:05,755 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(3091): Received CLOSE for 0649235964cb53366e714cb60b4a3e19 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(959): stopping server e335232f7491,39941,1732467159848 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:39941. 2024-11-24T16:53:05,756 DEBUG [RS:0;e335232f7491:39941 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:53:05,756 DEBUG [RS:0;e335232f7491:39941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:53:05,756 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0649235964cb53366e714cb60b4a3e19, disabling compactions & flushes 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:53:05,756 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:53:05,756 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:53:05,756 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. after waiting 0 ms 2024-11-24T16:53:05,756 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:53:05,756 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T16:53:05,756 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0649235964cb53366e714cb60b4a3e19=TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19.} 2024-11-24T16:53:05,756 DEBUG [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1351): Waiting on 0649235964cb53366e714cb60b4a3e19, 1588230740 2024-11-24T16:53:05,756 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:53:05,756 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:53:05,756 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:53:05,757 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:53:05,757 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:53:05,765 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/default/TestLogRolling-testLogRollOnPipelineRestart/0649235964cb53366e714cb60b4a3e19/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-24T16:53:05,766 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:53:05,766 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0649235964cb53366e714cb60b4a3e19: Waiting for close lock at 1732467185756Running coprocessor pre-close hooks at 1732467185756Disabling compacts and flushes for region at 1732467185756Disabling writes for close at 1732467185756Writing region close event to WAL at 1732467185760 (+4 ms)Running coprocessor post-close hooks at 1732467185766 (+6 ms)Closed at 1732467185766 2024-11-24T16:53:05,766 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732467160783.0649235964cb53366e714cb60b4a3e19. 2024-11-24T16:53:05,768 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T16:53:05,769 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:53:05,769 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:53:05,769 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467185756Running coprocessor pre-close hooks at 1732467185756Disabling compacts and flushes for region at 1732467185756Disabling writes for close at 1732467185757 (+1 ms)Writing region close event to WAL at 1732467185764 (+7 ms)Running coprocessor post-close hooks at 1732467185769 (+5 ms)Closed at 1732467185769 2024-11-24T16:53:05,769 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:53:05,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:05,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:05,957 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(976): stopping server e335232f7491,39941,1732467159848; all regions closed. 2024-11-24T16:53:05,957 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,957 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,957 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,957 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:05,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741840_1023 (size=825) 2024-11-24T16:53:05,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741840_1023 (size=825) 2024-11-24T16:53:06,109 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:53:06,194 INFO [regionserver/e335232f7491:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T16:53:06,194 INFO [regionserver/e335232f7491:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T16:53:06,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:06,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:07,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:07,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:08,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:08,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:09,600 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta after 4001ms 2024-11-24T16:53:09,608 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/WALs/e335232f7491,39941,1732467159848/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta to hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs/e335232f7491%2C39941%2C1732467159848.meta.1732467160630.meta 2024-11-24T16:53:09,612 DEBUG [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs 2024-11-24T16:53:09,612 INFO [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C39941%2C1732467159848.meta:.meta(num 1732467185593) 2024-11-24T16:53:09,613 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,613 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,613 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,613 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741844_1028 (size=1162) 2024-11-24T16:53:09,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741844_1028 (size=1162) 2024-11-24T16:53:09,624 DEBUG [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs 2024-11-24T16:53:09,624 INFO [RS:0;e335232f7491:39941 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C39941%2C1732467159848:(num 1732467185696) 2024-11-24T16:53:09,624 DEBUG [RS:0;e335232f7491:39941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:53:09,624 INFO [RS:0;e335232f7491:39941 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:53:09,624 INFO [RS:0;e335232f7491:39941 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:53:09,625 INFO [RS:0;e335232f7491:39941 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T16:53:09,625 INFO [RS:0;e335232f7491:39941 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:53:09,625 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:53:09,625 INFO [RS:0;e335232f7491:39941 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39941 2024-11-24T16:53:09,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:53:09,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,39941,1732467159848 2024-11-24T16:53:09,627 INFO [RS:0;e335232f7491:39941 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:53:09,629 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,39941,1732467159848] 2024-11-24T16:53:09,630 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,39941,1732467159848 already deleted, retry=false 2024-11-24T16:53:09,630 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,39941,1732467159848 expired; onlineServers=0 2024-11-24T16:53:09,630 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,34461,1732467159799' ***** 2024-11-24T16:53:09,630 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:53:09,630 INFO [M:0;e335232f7491:34461 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:53:09,630 INFO [M:0;e335232f7491:34461 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:53:09,630 DEBUG [M:0;e335232f7491:34461 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:53:09,630 DEBUG [M:0;e335232f7491:34461 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:53:09,630 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:53:09,630 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467160016 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467160016,5,FailOnTimeoutGroup] 2024-11-24T16:53:09,630 INFO [M:0;e335232f7491:34461 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:53:09,630 INFO [M:0;e335232f7491:34461 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:53:09,631 DEBUG [M:0;e335232f7491:34461 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:53:09,631 INFO [M:0;e335232f7491:34461 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:53:09,631 INFO [M:0;e335232f7491:34461 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:53:09,631 INFO [M:0;e335232f7491:34461 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:53:09,631 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467160016 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467160016,5,FailOnTimeoutGroup] 2024-11-24T16:53:09,631 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:53:09,633 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T16:53:09,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:53:09,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:09,638 DEBUG [M:0;e335232f7491:34461 {}] zookeeper.ZKUtil(347): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T16:53:09,638 WARN [M:0;e335232f7491:34461 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T16:53:09,639 INFO [M:0;e335232f7491:34461 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/.lastflushedseqids 2024-11-24T16:53:09,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741846_1030 (size=139) 2024-11-24T16:53:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741846_1030 (size=139) 2024-11-24T16:53:09,647 INFO [M:0;e335232f7491:34461 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:53:09,647 INFO [M:0;e335232f7491:34461 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:53:09,647 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:53:09,647 INFO [M:0;e335232f7491:34461 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:09,647 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:09,647 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:53:09,647 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:09,647 INFO [M:0;e335232f7491:34461 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-24T16:53:09,648 ERROR [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData-prefix:e335232f7491,34461,1732467159799 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:09,648 WARN [FSHLog-0-hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData-prefix:e335232f7491,34461,1732467159799 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:09,648 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e335232f7491%2C34461%2C1732467159799:(num 1732467159936) roll requested 2024-11-24T16:53:09,648 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C34461%2C1732467159799.1732467189648 2024-11-24T16:53:09,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,661 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,662 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467189648 2024-11-24T16:53:09,662 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:09,662 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37059,DS-74588b0d-9974-4d3a-81c5-3b5ee0677613,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T16:53:09,663 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 2024-11-24T16:53:09,663 WARN [IPC Server handler 3 on default port 34209 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1014 2024-11-24T16:53:09,663 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 after 0ms 2024-11-24T16:53:09,671 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43665:43665),(127.0.0.1/127.0.0.1:33633:33633)] 2024-11-24T16:53:09,671 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 is not closed yet, will try archiving it next time 2024-11-24T16:53:09,689 DEBUG [M:0;e335232f7491:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3bc267e249014e7695a5d2c52707202e is 82, key is hbase:meta,,1/info:regioninfo/1732467160655/Put/seqid=0 2024-11-24T16:53:09,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741848_1033 (size=5672) 2024-11-24T16:53:09,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741848_1033 (size=5672) 2024-11-24T16:53:09,699 INFO [M:0;e335232f7491:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3bc267e249014e7695a5d2c52707202e 2024-11-24T16:53:09,721 DEBUG [M:0;e335232f7491:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc68a781e2de43aba203a3ab9449cad8 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732467161157/Put/seqid=0 2024-11-24T16:53:09,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741849_1034 (size=6119) 2024-11-24T16:53:09,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741849_1034 (size=6119) 2024-11-24T16:53:09,726 INFO [M:0;e335232f7491:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc68a781e2de43aba203a3ab9449cad8 2024-11-24T16:53:09,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:53:09,729 INFO [RS:0;e335232f7491:39941 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:53:09,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39941-0x1014ae4dd780001, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:53:09,729 INFO [RS:0;e335232f7491:39941 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,39941,1732467159848; zookeeper connection closed. 2024-11-24T16:53:09,729 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@176f3ea6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@176f3ea6 2024-11-24T16:53:09,729 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T16:53:09,747 DEBUG [M:0;e335232f7491:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cd6e4d1d40e4473a5df5e3ac66b9058 is 69, key is e335232f7491,39941,1732467159848/rs:state/1732467160093/Put/seqid=0 2024-11-24T16:53:09,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741850_1035 (size=5156) 2024-11-24T16:53:09,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741850_1035 (size=5156) 2024-11-24T16:53:09,751 INFO [M:0;e335232f7491:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cd6e4d1d40e4473a5df5e3ac66b9058 2024-11-24T16:53:09,770 DEBUG [M:0;e335232f7491:34461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7f9d459321de4581bf7cf3ceda2ea924 is 52, key is load_balancer_on/state:d/1732467160778/Put/seqid=0 2024-11-24T16:53:09,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741851_1036 (size=5056) 2024-11-24T16:53:09,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741851_1036 (size=5056) 2024-11-24T16:53:09,776 INFO [M:0;e335232f7491:34461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7f9d459321de4581bf7cf3ceda2ea924 2024-11-24T16:53:09,781 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3bc267e249014e7695a5d2c52707202e as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3bc267e249014e7695a5d2c52707202e 2024-11-24T16:53:09,782 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:53:09,786 INFO [M:0;e335232f7491:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3bc267e249014e7695a5d2c52707202e, entries=8, sequenceid=56, filesize=5.5 K 2024-11-24T16:53:09,787 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc68a781e2de43aba203a3ab9449cad8 as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc68a781e2de43aba203a3ab9449cad8 2024-11-24T16:53:09,792 INFO [M:0;e335232f7491:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc68a781e2de43aba203a3ab9449cad8, entries=6, sequenceid=56, filesize=6.0 K 2024-11-24T16:53:09,793 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cd6e4d1d40e4473a5df5e3ac66b9058 as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cd6e4d1d40e4473a5df5e3ac66b9058 2024-11-24T16:53:09,797 INFO [M:0;e335232f7491:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cd6e4d1d40e4473a5df5e3ac66b9058, entries=1, sequenceid=56, filesize=5.0 K 2024-11-24T16:53:09,798 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7f9d459321de4581bf7cf3ceda2ea924 as hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7f9d459321de4581bf7cf3ceda2ea924 2024-11-24T16:53:09,802 INFO [M:0;e335232f7491:34461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7f9d459321de4581bf7cf3ceda2ea924, entries=1, sequenceid=56, filesize=4.9 K 2024-11-24T16:53:09,803 INFO [M:0;e335232f7491:34461 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=56, compaction requested=false 2024-11-24T16:53:09,804 INFO [M:0;e335232f7491:34461 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:09,804 DEBUG [M:0;e335232f7491:34461 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467189647Disabling compacts and flushes for region at 1732467189647Disabling writes for close at 1732467189647Obtaining lock to block concurrent updates at 1732467189647Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467189647Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732467189648 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467189671 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467189671Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467189689 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467189689Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467189704 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467189720 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467189720Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467189731 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467189746 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467189746Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467189756 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467189769 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467189769Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@253c5c9b: reopening flushed file at 1732467189781 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f9ecbcc: reopening flushed file at 1732467189786 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3477f416: reopening flushed file at 1732467189792 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f292883: reopening flushed file at 1732467189797 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=56, compaction requested=false at 1732467189803 (+6 ms)Writing region close event to WAL at 1732467189804 (+1 ms)Closed at 1732467189804 2024-11-24T16:53:09,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:09,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36919 is added to blk_1073741847_1031 (size=757) 2024-11-24T16:53:09,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46767 is added to blk_1073741847_1031 (size=757) 2024-11-24T16:53:09,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:09,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:10,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:10,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:10,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:11,292 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:53:11,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:11,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:11,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:12,634 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1014: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T16:53:12,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:12,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:13,664 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 after 4001ms 2024-11-24T16:53:13,665 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/WALs/e335232f7491,34461,1732467159799/e335232f7491%2C34461%2C1732467159799.1732467159936 to hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/oldWALs/e335232f7491%2C34461%2C1732467159799.1732467159936 2024-11-24T16:53:13,667 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/MasterData/oldWALs/e335232f7491%2C34461%2C1732467159799.1732467159936 to hdfs://localhost:34209/user/jenkins/test-data/e85e8d82-9716-9a03-1e0d-44f4d2b837ef/oldWALs/e335232f7491%2C34461%2C1732467159799.1732467159936$masterlocalwal$ 2024-11-24T16:53:13,668 INFO [M:0;e335232f7491:34461 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:53:13,668 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:53:13,668 INFO [M:0;e335232f7491:34461 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34461 2024-11-24T16:53:13,668 INFO [M:0;e335232f7491:34461 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:53:13,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:53:13,770 INFO [M:0;e335232f7491:34461 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:53:13,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34461-0x1014ae4dd780000, quorum=127.0.0.1:62232, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:53:13,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ed6e4b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:53:13,772 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@929fb8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:53:13,772 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:53:13,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3172a7c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:53:13,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@652ca842{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:53:13,774 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:53:13,774 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845562487-172.17.0.2-1732467159106 (Datanode Uuid fc58d58b-ec27-4140-8039-694920e754c0) service to localhost/127.0.0.1:34209 2024-11-24T16:53:13,774 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:53:13,774 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:53:13,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data3/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:53:13,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data4/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:53:13,775 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:53:13,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5036ecf5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:53:13,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2647e756{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:53:13,777 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:53:13,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@290ac13e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:53:13,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67d70e61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:53:13,778 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:53:13,778 WARN [BP-1845562487-172.17.0.2-1732467159106 heartbeating to localhost/127.0.0.1:34209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845562487-172.17.0.2-1732467159106 (Datanode Uuid 646f4b75-dde7-4682-99de-65bd4c48f64b) service to localhost/127.0.0.1:34209 2024-11-24T16:53:13,778 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:53:13,778 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:53:13,779 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data1/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:53:13,779 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/cluster_587be507-d722-02ec-c8ce-7696bca9be9c/data/data2/current/BP-1845562487-172.17.0.2-1732467159106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:53:13,779 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:53:13,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e54696a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:53:13,785 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@228e200c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:53:13,785 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:53:13,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65e5bfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:53:13,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@affd6eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir/,STOPPED} 2024-11-24T16:53:13,791 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:53:13,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:53:13,816 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34209 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34209 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34209 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=156 (was 162), ProcessCount=11 (was 11), AvailableMemoryMB=6238 (was 5925) - AvailableMemoryMB LEAK? - 2024-11-24T16:53:13,824 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=156, ProcessCount=11, AvailableMemoryMB=6238 2024-11-24T16:53:13,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:53:13,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.log.dir so I do NOT create it in target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d 2024-11-24T16:53:13,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9778ece6-5dba-f683-f85d-794c2439525c/hadoop.tmp.dir so I do NOT create it in target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d 2024-11-24T16:53:13,824 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d, deleteOnExit=true 2024-11-24T16:53:13,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/test.cache.data in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:53:13,825 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:53:13,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:53:13,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:53:13,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:53:13,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:53:13,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:53:13,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:53:13,841 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:53:13,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:13,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:13,913 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:53:13,917 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:53:13,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:53:13,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:53:13,918 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:53:13,919 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:53:13,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@701842fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:53:13,920 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40e0483f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:53:14,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b4e4fbe{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/java.io.tmpdir/jetty-localhost-42417-hadoop-hdfs-3_4_1-tests_jar-_-any-8791080293905110787/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:53:14,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18baee58{HTTP/1.1, (http/1.1)}{localhost:42417} 2024-11-24T16:53:14,034 INFO [Time-limited test {}] server.Server(415): Started @184996ms 2024-11-24T16:53:14,048 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:53:14,105 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:53:14,108 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:53:14,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:53:14,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:53:14,109 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:53:14,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e7fac47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:53:14,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59a5f2c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:53:14,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b24fbcd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/java.io.tmpdir/jetty-localhost-40343-hadoop-hdfs-3_4_1-tests_jar-_-any-4306258231210704271/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:53:14,224 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21bca0b2{HTTP/1.1, (http/1.1)}{localhost:40343} 2024-11-24T16:53:14,224 INFO [Time-limited test {}] server.Server(415): Started @185186ms 2024-11-24T16:53:14,225 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:53:14,253 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:53:14,256 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:53:14,257 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:53:14,257 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:53:14,257 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:53:14,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b5fa12b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:53:14,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cac9c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:53:14,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:53:14,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T16:53:14,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T16:53:14,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T16:53:14,318 WARN [Thread-1639 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data1/current/BP-956033605-172.17.0.2-1732467193858/current, will proceed with Du for space computation calculation, 2024-11-24T16:53:14,318 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data2/current/BP-956033605-172.17.0.2-1732467193858/current, will proceed with Du for space computation calculation, 2024-11-24T16:53:14,334 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:53:14,337 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa7b3980415a331ec with lease ID 0xc03a7e2d9e07b7c1: Processing first storage report for DS-d25e1979-eaf7-4458-b842-c62c5927b2d5 from datanode DatanodeRegistration(127.0.0.1:43875, datanodeUuid=fc2d3e14-97f7-40b5-afe0-0d1f0936fc25, infoPort=46273, infoSecurePort=0, ipcPort=38241, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858) 2024-11-24T16:53:14,337 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa7b3980415a331ec with lease ID 0xc03a7e2d9e07b7c1: from storage DS-d25e1979-eaf7-4458-b842-c62c5927b2d5 node DatanodeRegistration(127.0.0.1:43875, datanodeUuid=fc2d3e14-97f7-40b5-afe0-0d1f0936fc25, infoPort=46273, infoSecurePort=0, ipcPort=38241, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:53:14,337 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa7b3980415a331ec with lease ID 0xc03a7e2d9e07b7c1: Processing first storage report for DS-121c753d-2ef7-4fe2-814d-2165c19602a0 from datanode DatanodeRegistration(127.0.0.1:43875, datanodeUuid=fc2d3e14-97f7-40b5-afe0-0d1f0936fc25, infoPort=46273, infoSecurePort=0, ipcPort=38241, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858) 2024-11-24T16:53:14,337 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa7b3980415a331ec with lease ID 0xc03a7e2d9e07b7c1: from storage DS-121c753d-2ef7-4fe2-814d-2165c19602a0 node DatanodeRegistration(127.0.0.1:43875, datanodeUuid=fc2d3e14-97f7-40b5-afe0-0d1f0936fc25, infoPort=46273, infoSecurePort=0, ipcPort=38241, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:53:14,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72ef9fa2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/java.io.tmpdir/jetty-localhost-42291-hadoop-hdfs-3_4_1-tests_jar-_-any-14167195885028360486/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:53:14,377 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@618f0457{HTTP/1.1, (http/1.1)}{localhost:42291} 2024-11-24T16:53:14,377 INFO [Time-limited test {}] server.Server(415): Started @185339ms 2024-11-24T16:53:14,378 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:53:14,467 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data3/current/BP-956033605-172.17.0.2-1732467193858/current, will proceed with Du for space computation calculation, 2024-11-24T16:53:14,467 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data4/current/BP-956033605-172.17.0.2-1732467193858/current, will proceed with Du for space computation calculation, 2024-11-24T16:53:14,483 WARN [Thread-1654 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:53:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9788aa229cb0da14 with lease ID 0xc03a7e2d9e07b7c2: Processing first storage report for DS-bcbdaa5b-4b4b-4350-8ac7-b9bce3385684 from datanode DatanodeRegistration(127.0.0.1:34143, datanodeUuid=71e6b226-7ae8-4198-a933-cde3ac6c257d, infoPort=40923, infoSecurePort=0, ipcPort=42061, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858) 2024-11-24T16:53:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9788aa229cb0da14 with lease ID 0xc03a7e2d9e07b7c2: from storage DS-bcbdaa5b-4b4b-4350-8ac7-b9bce3385684 node DatanodeRegistration(127.0.0.1:34143, datanodeUuid=71e6b226-7ae8-4198-a933-cde3ac6c257d, infoPort=40923, infoSecurePort=0, ipcPort=42061, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:53:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9788aa229cb0da14 with lease ID 0xc03a7e2d9e07b7c2: Processing first storage report for DS-c5c05721-5455-4d1c-ada7-fce102189129 from datanode DatanodeRegistration(127.0.0.1:34143, datanodeUuid=71e6b226-7ae8-4198-a933-cde3ac6c257d, infoPort=40923, infoSecurePort=0, ipcPort=42061, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858) 2024-11-24T16:53:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9788aa229cb0da14 with lease ID 0xc03a7e2d9e07b7c2: from storage DS-c5c05721-5455-4d1c-ada7-fce102189129 node DatanodeRegistration(127.0.0.1:34143, datanodeUuid=71e6b226-7ae8-4198-a933-cde3ac6c257d, infoPort=40923, infoSecurePort=0, ipcPort=42061, storageInfo=lv=-57;cid=testClusterID;nsid=1619398937;c=1732467193858), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:53:14,500 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d 2024-11-24T16:53:14,502 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/zookeeper_0, clientPort=50115, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:53:14,503 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50115 2024-11-24T16:53:14,503 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,504 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:53:14,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:53:14,514 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5 with version=8 2024-11-24T16:53:14,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:53:14,517 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:53:14,517 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:53:14,518 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44489 2024-11-24T16:53:14,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44489 connecting to ZooKeeper ensemble=127.0.0.1:50115 2024-11-24T16:53:14,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:444890x0, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:53:14,526 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44489-0x1014ae565150000 connected 2024-11-24T16:53:14,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:53:14,549 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5, hbase.cluster.distributed=false 2024-11-24T16:53:14,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:53:14,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44489 2024-11-24T16:53:14,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44489 2024-11-24T16:53:14,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44489 2024-11-24T16:53:14,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44489 2024-11-24T16:53:14,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44489 2024-11-24T16:53:14,571 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:53:14,571 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:53:14,572 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37349 2024-11-24T16:53:14,573 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37349 connecting to ZooKeeper ensemble=127.0.0.1:50115 2024-11-24T16:53:14,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,575 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373490x0, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:53:14,579 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:53:14,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37349-0x1014ae565150001 connected 2024-11-24T16:53:14,579 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:53:14,581 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:53:14,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:53:14,582 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:53:14,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37349 2024-11-24T16:53:14,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37349 2024-11-24T16:53:14,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37349 2024-11-24T16:53:14,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37349 2024-11-24T16:53:14,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37349 2024-11-24T16:53:14,600 DEBUG [M:0;e335232f7491:44489 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:44489 2024-11-24T16:53:14,600 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,44489,1732467194516 2024-11-24T16:53:14,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:53:14,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:53:14,609 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,44489,1732467194516 2024-11-24T16:53:14,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:53:14,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,611 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:53:14,611 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,44489,1732467194516 from backup master directory 2024-11-24T16:53:14,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,44489,1732467194516 2024-11-24T16:53:14,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:53:14,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:53:14,612 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:53:14,612 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,44489,1732467194516 2024-11-24T16:53:14,617 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/hbase.id] with ID: effb7117-5108-48b9-9064-3beba4126c09 2024-11-24T16:53:14,617 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/.tmp/hbase.id 2024-11-24T16:53:14,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:53:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:53:14,623 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/.tmp/hbase.id]:[hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/hbase.id] 2024-11-24T16:53:14,633 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:14,633 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:53:14,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T16:53:14,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:53:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:53:14,643 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:53:14,644 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:53:14,644 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:53:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:53:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:53:14,654 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store 2024-11-24T16:53:14,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:53:14,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:53:14,660 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:53:14,660 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:53:14,660 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:14,660 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:14,660 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:53:14,660 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:14,661 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:53:14,661 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467194660Disabling compacts and flushes for region at 1732467194660Disabling writes for close at 1732467194660Writing region close event to WAL at 1732467194660Closed at 1732467194660 2024-11-24T16:53:14,661 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/.initializing 2024-11-24T16:53:14,661 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/WALs/e335232f7491,44489,1732467194516 2024-11-24T16:53:14,663 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C44489%2C1732467194516, suffix=, logDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/WALs/e335232f7491,44489,1732467194516, archiveDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/oldWALs, maxLogs=10 2024-11-24T16:53:14,664 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C44489%2C1732467194516.1732467194664 2024-11-24T16:53:14,668 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/WALs/e335232f7491,44489,1732467194516/e335232f7491%2C44489%2C1732467194516.1732467194664 2024-11-24T16:53:14,669 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46273:46273),(127.0.0.1/127.0.0.1:40923:40923)] 2024-11-24T16:53:14,669 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:53:14,670 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:53:14,670 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,670 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,671 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:53:14,672 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:14,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:53:14,674 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:53:14,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:53:14,675 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:53:14,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:53:14,677 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:53:14,677 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,678 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,678 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,679 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,679 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,680 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:53:14,681 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:53:14,683 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:53:14,684 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732839, jitterRate=-0.0681481808423996}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:53:14,684 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467194670Initializing all the Stores at 1732467194670Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467194670Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467194671 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467194671Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467194671Cleaning up temporary data from old regions at 1732467194680 (+9 ms)Region opened successfully at 1732467194684 (+4 ms) 2024-11-24T16:53:14,685 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:53:14,688 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f9c6b2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:53:14,689 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:53:14,689 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:53:14,689 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:53:14,689 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:53:14,690 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T16:53:14,690 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T16:53:14,690 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:53:14,692 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:53:14,693 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:53:14,696 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:53:14,696 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:53:14,697 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:53:14,697 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:53:14,698 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:53:14,699 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:53:14,700 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:53:14,700 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:53:14,701 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:53:14,703 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:53:14,705 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:53:14,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:53:14,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:53:14,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,707 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,44489,1732467194516, sessionid=0x1014ae565150000, setting cluster-up flag (Was=false) 2024-11-24T16:53:14,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,715 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:53:14,716 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,44489,1732467194516 2024-11-24T16:53:14,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:14,724 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:53:14,725 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,44489,1732467194516 2024-11-24T16:53:14,727 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:53:14,728 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:53:14,729 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:53:14,729 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:53:14,729 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,44489,1732467194516 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:53:14,730 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,733 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467224733 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:53:14,734 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:53:14,734 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:53:14,734 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:53:14,735 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:53:14,735 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:53:14,735 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467194735,5,FailOnTimeoutGroup] 2024-11-24T16:53:14,735 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,735 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467194735,5,FailOnTimeoutGroup] 2024-11-24T16:53:14,735 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,735 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:53:14,735 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,735 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,735 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:53:14,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:53:14,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:53:14,743 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:53:14,743 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5 2024-11-24T16:53:14,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:53:14,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:53:14,754 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:53:14,755 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:53:14,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:53:14,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:14,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:53:14,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:53:14,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:14,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:53:14,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:53:14,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:14,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:53:14,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:53:14,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:14,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:14,762 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:53:14,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740 2024-11-24T16:53:14,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740 2024-11-24T16:53:14,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:53:14,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:53:14,765 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:53:14,766 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:53:14,768 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:53:14,769 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730361, jitterRate=-0.07129837572574615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:53:14,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467194754Initializing all the Stores at 1732467194755 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467194755Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467194755Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467194755Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467194755Cleaning up temporary data from old regions at 1732467194765 (+10 ms)Region opened successfully at 1732467194770 (+5 ms) 2024-11-24T16:53:14,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:53:14,770 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:53:14,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:53:14,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:53:14,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:53:14,770 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:53:14,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467194770Disabling compacts and flushes for region at 1732467194770Disabling writes for close at 1732467194770Writing region close event to WAL at 1732467194770Closed at 1732467194770 2024-11-24T16:53:14,772 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:53:14,772 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:53:14,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:53:14,774 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:53:14,775 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:53:14,790 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(746): ClusterId : effb7117-5108-48b9-9064-3beba4126c09 2024-11-24T16:53:14,790 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:53:14,797 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:53:14,797 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:53:14,799 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:53:14,800 DEBUG [RS:0;e335232f7491:37349 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32a9657a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:53:14,811 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:37349 2024-11-24T16:53:14,812 INFO [RS:0;e335232f7491:37349 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:53:14,812 INFO [RS:0;e335232f7491:37349 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:53:14,812 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:53:14,812 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,44489,1732467194516 with port=37349, startcode=1732467194570 2024-11-24T16:53:14,812 DEBUG [RS:0;e335232f7491:37349 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:53:14,814 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53465, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:53:14,815 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44489 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,37349,1732467194570 2024-11-24T16:53:14,815 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44489 {}] master.ServerManager(517): Registering regionserver=e335232f7491,37349,1732467194570 2024-11-24T16:53:14,817 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5 2024-11-24T16:53:14,817 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34699 2024-11-24T16:53:14,817 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:53:14,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:53:14,819 DEBUG [RS:0;e335232f7491:37349 {}] zookeeper.ZKUtil(111): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,37349,1732467194570 2024-11-24T16:53:14,820 WARN [RS:0;e335232f7491:37349 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:53:14,820 INFO [RS:0;e335232f7491:37349 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:53:14,820 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570 2024-11-24T16:53:14,821 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,37349,1732467194570] 2024-11-24T16:53:14,826 INFO [RS:0;e335232f7491:37349 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:53:14,827 INFO [RS:0;e335232f7491:37349 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:53:14,827 INFO [RS:0;e335232f7491:37349 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:53:14,828 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,828 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:53:14,828 INFO [RS:0;e335232f7491:37349 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:53:14,829 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:53:14,829 DEBUG [RS:0;e335232f7491:37349 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:53:14,830 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,830 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,830 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,830 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,830 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,830 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,37349,1732467194570-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:53:14,845 INFO [RS:0;e335232f7491:37349 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:53:14,845 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,37349,1732467194570-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,845 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,845 INFO [RS:0;e335232f7491:37349 {}] regionserver.Replication(171): e335232f7491,37349,1732467194570 started 2024-11-24T16:53:14,859 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:14,859 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,37349,1732467194570, RpcServer on e335232f7491/172.17.0.2:37349, sessionid=0x1014ae565150001 2024-11-24T16:53:14,859 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:53:14,859 DEBUG [RS:0;e335232f7491:37349 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,37349,1732467194570 2024-11-24T16:53:14,859 DEBUG [RS:0;e335232f7491:37349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,37349,1732467194570' 2024-11-24T16:53:14,859 DEBUG [RS:0;e335232f7491:37349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:53:14,860 DEBUG [RS:0;e335232f7491:37349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:53:14,860 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:53:14,860 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:53:14,860 DEBUG [RS:0;e335232f7491:37349 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,37349,1732467194570 2024-11-24T16:53:14,860 DEBUG [RS:0;e335232f7491:37349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,37349,1732467194570' 2024-11-24T16:53:14,860 DEBUG [RS:0;e335232f7491:37349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:53:14,861 DEBUG [RS:0;e335232f7491:37349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:53:14,861 DEBUG [RS:0;e335232f7491:37349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:53:14,861 INFO [RS:0;e335232f7491:37349 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:53:14,861 INFO [RS:0;e335232f7491:37349 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:53:14,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:14,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:14,925 WARN [e335232f7491:44489 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T16:53:14,963 INFO [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C37349%2C1732467194570, suffix=, logDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570, archiveDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/oldWALs, maxLogs=32 2024-11-24T16:53:14,963 INFO [RS:0;e335232f7491:37349 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C37349%2C1732467194570.1732467194963 2024-11-24T16:53:14,969 INFO [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467194963 2024-11-24T16:53:14,970 DEBUG [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46273:46273),(127.0.0.1/127.0.0.1:40923:40923)] 2024-11-24T16:53:15,175 DEBUG [e335232f7491:44489 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:53:15,176 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,37349,1732467194570 2024-11-24T16:53:15,177 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,37349,1732467194570, state=OPENING 2024-11-24T16:53:15,179 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:53:15,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:15,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:53:15,183 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:53:15,183 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:53:15,183 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:53:15,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,37349,1732467194570}] 2024-11-24T16:53:15,336 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:53:15,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51523, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:53:15,342 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:53:15,342 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:53:15,344 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C37349%2C1732467194570.meta, suffix=.meta, logDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570, archiveDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/oldWALs, maxLogs=32 2024-11-24T16:53:15,344 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C37349%2C1732467194570.meta.1732467195344.meta 2024-11-24T16:53:15,350 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.meta.1732467195344.meta 2024-11-24T16:53:15,353 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46273:46273),(127.0.0.1/127.0.0.1:40923:40923)] 2024-11-24T16:53:15,357 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:53:15,357 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:53:15,357 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:53:15,357 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:53:15,357 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:53:15,358 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:53:15,358 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:53:15,358 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:53:15,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:53:15,361 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:53:15,361 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:15,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:15,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:53:15,363 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:53:15,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:15,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:15,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:53:15,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:53:15,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:15,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:15,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:53:15,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:53:15,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:15,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:53:15,367 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:53:15,367 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740 2024-11-24T16:53:15,369 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740 2024-11-24T16:53:15,370 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:53:15,370 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:53:15,371 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:53:15,373 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:53:15,374 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735046, jitterRate=-0.06534178555011749}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:53:15,374 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:53:15,375 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467195358Writing region info on filesystem at 1732467195358Initializing all the Stores at 1732467195359 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467195359Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467195360 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467195360Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467195360Cleaning up temporary data from old regions at 1732467195370 (+10 ms)Running coprocessor post-open hooks at 1732467195374 (+4 ms)Region opened successfully at 1732467195375 (+1 ms) 2024-11-24T16:53:15,376 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467195336 2024-11-24T16:53:15,379 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:53:15,379 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:53:15,380 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,37349,1732467194570 2024-11-24T16:53:15,381 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,37349,1732467194570, state=OPEN 2024-11-24T16:53:15,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:53:15,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:53:15,385 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,37349,1732467194570 2024-11-24T16:53:15,385 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:53:15,385 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:53:15,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:53:15,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,37349,1732467194570 in 202 msec 2024-11-24T16:53:15,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:53:15,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-24T16:53:15,392 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:53:15,392 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:53:15,394 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:53:15,394 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,37349,1732467194570, seqNum=-1] 2024-11-24T16:53:15,394 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:53:15,396 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39797, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:53:15,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 672 msec 2024-11-24T16:53:15,401 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467195401, completionTime=-1 2024-11-24T16:53:15,401 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:53:15,401 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467255403 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467315403 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,44489,1732467194516-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,44489,1732467194516-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,44489,1732467194516-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:44489, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,403 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,404 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,405 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:53:15,407 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-24T16:53:15,407 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:53:15,407 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:53:15,407 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:53:15,408 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:53:15,408 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:53:15,408 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,44489,1732467194516-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:53:15,408 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,44489,1732467194516-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:53:15,410 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:53:15,410 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:53:15,410 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,44489,1732467194516-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:15,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34fe4f6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:53:15,491 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,44489,-1 for getting cluster id 2024-11-24T16:53:15,491 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:53:15,493 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'effb7117-5108-48b9-9064-3beba4126c09' 2024-11-24T16:53:15,494 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:53:15,495 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "effb7117-5108-48b9-9064-3beba4126c09" 2024-11-24T16:53:15,495 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f697d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:53:15,495 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,44489,-1] 2024-11-24T16:53:15,495 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:53:15,496 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:53:15,497 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:53:15,498 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c7248d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:53:15,498 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:53:15,499 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,37349,1732467194570, seqNum=-1] 2024-11-24T16:53:15,500 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:53:15,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:53:15,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,44489,1732467194516 2024-11-24T16:53:15,503 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:53:15,505 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:53:15,506 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T16:53:15,506 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is e335232f7491,44489,1732467194516 2024-11-24T16:53:15,507 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@35249950 2024-11-24T16:53:15,507 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T16:53:15,508 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49206, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T16:53:15,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T16:53:15,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T16:53:15,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:53:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:15,511 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T16:53:15,511 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:15,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-24T16:53:15,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:53:15,513 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T16:53:15,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741835_1011 (size=405) 2024-11-24T16:53:15,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741835_1011 (size=405) 2024-11-24T16:53:15,525 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5 2024-11-24T16:53:15,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741836_1012 (size=88) 2024-11-24T16:53:15,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741836_1012 (size=88) 2024-11-24T16:53:15,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:53:15,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, disabling compactions & flushes 2024-11-24T16:53:15,540 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. after waiting 0 ms 2024-11-24T16:53:15,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,540 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,540 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: Waiting for close lock at 1732467195540Disabling compacts and flushes for region at 1732467195540Disabling writes for close at 1732467195540Writing region close event to WAL at 1732467195540Closed at 1732467195540 2024-11-24T16:53:15,542 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T16:53:15,542 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732467195542"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467195542"}]},"ts":"1732467195542"} 2024-11-24T16:53:15,546 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T16:53:15,547 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T16:53:15,547 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467195547"}]},"ts":"1732467195547"} 2024-11-24T16:53:15,549 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-24T16:53:15,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, ASSIGN}] 2024-11-24T16:53:15,551 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, ASSIGN 2024-11-24T16:53:15,553 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, ASSIGN; state=OFFLINE, location=e335232f7491,37349,1732467194570; forceNewPlan=false, retain=false 2024-11-24T16:53:15,704 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, regionState=OPENING, regionLocation=e335232f7491,37349,1732467194570 2024-11-24T16:53:15,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, ASSIGN because future has completed 2024-11-24T16:53:15,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, server=e335232f7491,37349,1732467194570}] 2024-11-24T16:53:15,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:15,864 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,864 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:53:15,865 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,865 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:53:15,865 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,865 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,866 INFO [StoreOpener-78b4bc8c7fcd7ac9ef82d2beb46c2ad2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,867 INFO [StoreOpener-78b4bc8c7fcd7ac9ef82d2beb46c2ad2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 columnFamilyName info 2024-11-24T16:53:15,867 DEBUG [StoreOpener-78b4bc8c7fcd7ac9ef82d2beb46c2ad2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:53:15,868 INFO [StoreOpener-78b4bc8c7fcd7ac9ef82d2beb46c2ad2-1 {}] regionserver.HStore(327): Store=78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:53:15,868 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,869 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,869 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,869 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,869 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,871 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:15,873 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:53:15,873 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 78b4bc8c7fcd7ac9ef82d2beb46c2ad2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775073, jitterRate=-0.014444947242736816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:53:15,874 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:53:15,874 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: Running coprocessor pre-open hook at 1732467195865Writing region info on filesystem at 1732467195865Initializing all the Stores at 1732467195866 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467195866Cleaning up temporary data from old regions at 1732467195869 (+3 ms)Running coprocessor post-open hooks at 1732467195874 (+5 ms)Region opened successfully at 1732467195874 2024-11-24T16:53:15,875 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2., pid=6, masterSystemTime=1732467195860 2024-11-24T16:53:15,877 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,877 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:15,878 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,37349,1732467194570 2024-11-24T16:53:15,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, server=e335232f7491,37349,1732467194570 because future has completed 2024-11-24T16:53:15,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T16:53:15,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, server=e335232f7491,37349,1732467194570 in 175 msec 2024-11-24T16:53:15,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T16:53:15,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=78b4bc8c7fcd7ac9ef82d2beb46c2ad2, ASSIGN in 334 msec 2024-11-24T16:53:15,888 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T16:53:15,888 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467195888"}]},"ts":"1732467195888"} 2024-11-24T16:53:15,889 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-24T16:53:15,890 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T16:53:15,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 381 msec 2024-11-24T16:53:16,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:16,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:17,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:17,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:18,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:18,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:19,803 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:53:19,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:53:19,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:19,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:20,826 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T16:53:20,826 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-24T16:53:20,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:20,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:21,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:21,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:22,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:22,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:23,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:23,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:24,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T16:53:24,301 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T16:53:24,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:53:24,302 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T16:53:24,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T16:53:24,302 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T16:53:24,303 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:24,303 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T16:53:24,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:24,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:53:25,603 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T16:53:25,603 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-24T16:53:25,606 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:25,606 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:25,610 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2., hostname=e335232f7491,37349,1732467194570, seqNum=2] 2024-11-24T16:53:25,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:25,625 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T16:53:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T16:53:25,627 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T16:53:25,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T16:53:25,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T16:53:25,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:25,791 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T16:53:25,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/bacb127da256475c95fc605beb50760f is 1080, key is row0001/info:/1732467205611/Put/seqid=0 2024-11-24T16:53:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741837_1013 (size=6033) 2024-11-24T16:53:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741837_1013 (size=6033) 2024-11-24T16:53:25,814 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/bacb127da256475c95fc605beb50760f 2024-11-24T16:53:25,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/bacb127da256475c95fc605beb50760f as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/bacb127da256475c95fc605beb50760f 2024-11-24T16:53:25,826 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/bacb127da256475c95fc605beb50760f, entries=1, sequenceid=5, filesize=5.9 K 2024-11-24T16:53:25,827 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 in 36ms, sequenceid=5, compaction requested=false 2024-11-24T16:53:25,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: 2024-11-24T16:53:25,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:25,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T16:53:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T16:53:25,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T16:53:25,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-24T16:53:25,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-11-24T16:53:25,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:25,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:26,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:26,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:27,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:27,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:28,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:28,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:29,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:29,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:30,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:30,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:31,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:31,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:32,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:32,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:33,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:33,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:34,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:34,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T16:53:35,652 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T16:53:35,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T16:53:35,657 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T16:53:35,658 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T16:53:35,658 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T16:53:35,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-24T16:53:35,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:35,812 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T16:53:35,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/df6b3e8e38ca4ca38e1fe28d60333962 is 1080, key is row0002/info:/1732467215653/Put/seqid=0 2024-11-24T16:53:35,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741838_1014 (size=6033) 2024-11-24T16:53:35,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741838_1014 (size=6033) 2024-11-24T16:53:35,822 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/df6b3e8e38ca4ca38e1fe28d60333962 2024-11-24T16:53:35,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/df6b3e8e38ca4ca38e1fe28d60333962 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/df6b3e8e38ca4ca38e1fe28d60333962 2024-11-24T16:53:35,833 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/df6b3e8e38ca4ca38e1fe28d60333962, entries=1, sequenceid=9, filesize=5.9 K 2024-11-24T16:53:35,834 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 in 22ms, sequenceid=9, compaction requested=false 2024-11-24T16:53:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: 2024-11-24T16:53:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-24T16:53:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-24T16:53:35,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T16:53:35,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-24T16:53:35,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-24T16:53:35,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:35,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:36,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:36,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:37,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:37,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:38,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:38,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:39,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:39,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:53:39,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:39,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta after 68033ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T16:53:40,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:40,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:41,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:41,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:42,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:42,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:43,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:43,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:44,499 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:53:44,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:44,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T16:53:45,743 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T16:53:45,746 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C37349%2C1732467194570.1732467225746 2024-11-24T16:53:45,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:45,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:45,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:45,753 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:45,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:45,754 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467194963 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467225746 2024-11-24T16:53:45,755 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40923:40923),(127.0.0.1/127.0.0.1:46273:46273)] 2024-11-24T16:53:45,755 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467194963 is not closed yet, will try archiving it next time 2024-11-24T16:53:45,756 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:45,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741833_1009 (size=5546) 2024-11-24T16:53:45,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741833_1009 (size=5546) 2024-11-24T16:53:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T16:53:45,759 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T16:53:45,760 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T16:53:45,760 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T16:53:45,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:45,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:45,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-24T16:53:45,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:45,915 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T16:53:45,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/969d99b76f7146bcaafc0bff41b82292 is 1080, key is row0003/info:/1732467225744/Put/seqid=0 2024-11-24T16:53:45,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741840_1016 (size=6033) 2024-11-24T16:53:45,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741840_1016 (size=6033) 2024-11-24T16:53:45,926 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/969d99b76f7146bcaafc0bff41b82292 2024-11-24T16:53:45,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/969d99b76f7146bcaafc0bff41b82292 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/969d99b76f7146bcaafc0bff41b82292 2024-11-24T16:53:45,951 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/969d99b76f7146bcaafc0bff41b82292, entries=1, sequenceid=13, filesize=5.9 K 2024-11-24T16:53:45,952 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 in 37ms, sequenceid=13, compaction requested=true 2024-11-24T16:53:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: 2024-11-24T16:53:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-24T16:53:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-24T16:53:45,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T16:53:45,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 195 msec 2024-11-24T16:53:45,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 204 msec 2024-11-24T16:53:46,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:46,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:47,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:47,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:48,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:48,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:49,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:49,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:50,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:50,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:51,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:51,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:52,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:52,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:53,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:53,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:54,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:54,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:55,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T16:53:55,852 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T16:53:55,852 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:53:55,854 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:53:55,854 DEBUG [Time-limited test {}] regionserver.HStore(1541): 78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info is initiating minor compaction (all files) 2024-11-24T16:53:55,854 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:53:55,854 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:53:55,854 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:55,854 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/bacb127da256475c95fc605beb50760f, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/df6b3e8e38ca4ca38e1fe28d60333962, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/969d99b76f7146bcaafc0bff41b82292] into tmpdir=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp, totalSize=17.7 K 2024-11-24T16:53:55,855 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting bacb127da256475c95fc605beb50760f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732467205611 2024-11-24T16:53:55,855 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting df6b3e8e38ca4ca38e1fe28d60333962, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732467215653 2024-11-24T16:53:55,855 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 969d99b76f7146bcaafc0bff41b82292, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732467225744 2024-11-24T16:53:55,870 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 78b4bc8c7fcd7ac9ef82d2beb46c2ad2#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:53:55,870 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/6cdb67f2e1394a1caadb725189558ee7 is 1080, key is row0001/info:/1732467205611/Put/seqid=0 2024-11-24T16:53:55,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741841_1017 (size=8296) 2024-11-24T16:53:55,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741841_1017 (size=8296) 2024-11-24T16:53:55,881 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/6cdb67f2e1394a1caadb725189558ee7 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/6cdb67f2e1394a1caadb725189558ee7 2024-11-24T16:53:55,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:55,888 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info of 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 into 6cdb67f2e1394a1caadb725189558ee7(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:53:55,888 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: 2024-11-24T16:53:55,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:55,891 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C37349%2C1732467194570.1732467235891 2024-11-24T16:53:55,902 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:55,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:55,902 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:55,902 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:55,902 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:53:55,902 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467225746 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467235891 2024-11-24T16:53:55,903 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40923:40923),(127.0.0.1/127.0.0.1:46273:46273)] 2024-11-24T16:53:55,903 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467225746 is not closed yet, will try archiving it next time 2024-11-24T16:53:55,903 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467194963 to hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/oldWALs/e335232f7491%2C37349%2C1732467194570.1732467194963 2024-11-24T16:53:55,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741839_1015 (size=2520) 2024-11-24T16:53:55,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:55,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741839_1015 (size=2520) 2024-11-24T16:53:55,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:53:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T16:53:55,907 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T16:53:55,908 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T16:53:55,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T16:53:55,937 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T16:53:55,937 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T16:53:56,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-24T16:53:56,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:56,061 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T16:53:56,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/925d417f4a1d4818a551cb7dc83be709 is 1080, key is row0000/info:/1732467235889/Put/seqid=0 2024-11-24T16:53:56,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741843_1019 (size=6033) 2024-11-24T16:53:56,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741843_1019 (size=6033) 2024-11-24T16:53:56,078 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/925d417f4a1d4818a551cb7dc83be709 2024-11-24T16:53:56,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/925d417f4a1d4818a551cb7dc83be709 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/925d417f4a1d4818a551cb7dc83be709 2024-11-24T16:53:56,091 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/925d417f4a1d4818a551cb7dc83be709, entries=1, sequenceid=18, filesize=5.9 K 2024-11-24T16:53:56,092 INFO [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 in 31ms, sequenceid=18, compaction requested=false 2024-11-24T16:53:56,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: 2024-11-24T16:53:56,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:53:56,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-24T16:53:56,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-24T16:53:56,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T16:53:56,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-24T16:53:56,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-24T16:53:56,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:56,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:57,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:57,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:58,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:58,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:59,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:53:59,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:00,865 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, had cached 0 bytes from a total of 14329 2024-11-24T16:54:00,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:00,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:01,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:01,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:02,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:02,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:03,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:03,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:04,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:04,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:05,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:05,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:05,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T16:54:05,962 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T16:54:05,965 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C37349%2C1732467194570.1732467245965 2024-11-24T16:54:05,974 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:05,974 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:05,974 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:05,974 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:05,974 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:05,974 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467235891 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467245965 2024-11-24T16:54:05,975 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40923:40923),(127.0.0.1/127.0.0.1:46273:46273)] 2024-11-24T16:54:05,975 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467235891 is not closed yet, will try archiving it next time 2024-11-24T16:54:05,976 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/WALs/e335232f7491,37349,1732467194570/e335232f7491%2C37349%2C1732467194570.1732467225746 to hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/oldWALs/e335232f7491%2C37349%2C1732467194570.1732467225746 2024-11-24T16:54:05,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:54:05,976 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:54:05,976 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:54:05,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:54:05,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:54:05,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741842_1018 (size=2026) 2024-11-24T16:54:05,976 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:54:05,976 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:54:05,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741842_1018 (size=2026) 2024-11-24T16:54:05,978 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=597330653, stopped=false 2024-11-24T16:54:05,978 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,44489,1732467194516 2024-11-24T16:54:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:54:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:54:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:05,980 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:54:05,981 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:54:05,981 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:54:05,981 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:54:05,981 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:54:05,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:54:05,981 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,37349,1732467194570' ***** 2024-11-24T16:54:05,981 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:54:05,981 INFO [RS:0;e335232f7491:37349 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:54:05,982 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:54:05,982 INFO [RS:0;e335232f7491:37349 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:54:05,982 INFO [RS:0;e335232f7491:37349 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:54:05,982 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(3091): Received CLOSE for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:54:05,984 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(959): stopping server e335232f7491,37349,1732467194570 2024-11-24T16:54:05,984 INFO [RS:0;e335232f7491:37349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:54:05,984 INFO [RS:0;e335232f7491:37349 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:37349. 2024-11-24T16:54:05,984 DEBUG [RS:0;e335232f7491:37349 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:54:05,984 DEBUG [RS:0;e335232f7491:37349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:54:05,984 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2, disabling compactions & flushes 2024-11-24T16:54:05,984 INFO [RS:0;e335232f7491:37349 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:54:05,984 INFO [RS:0;e335232f7491:37349 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:54:05,984 INFO [RS:0;e335232f7491:37349 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:54:05,984 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:54:05,985 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. after waiting 0 ms 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:54:05,985 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T16:54:05,985 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T16:54:05,985 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 78b4bc8c7fcd7ac9ef82d2beb46c2ad2=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.} 2024-11-24T16:54:05,985 DEBUG [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:54:05,985 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:54:05,985 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:54:05,985 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-24T16:54:05,991 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/b403f42ac59c4c2ea697146c9b8245fb is 1080, key is row0001/info:/1732467245963/Put/seqid=0 2024-11-24T16:54:05,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741845_1021 (size=6033) 2024-11-24T16:54:05,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741845_1021 (size=6033) 2024-11-24T16:54:05,999 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/b403f42ac59c4c2ea697146c9b8245fb 2024-11-24T16:54:06,005 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/.tmp/info/b403f42ac59c4c2ea697146c9b8245fb as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/b403f42ac59c4c2ea697146c9b8245fb 2024-11-24T16:54:06,008 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/info/ca78fc1560784e86ae14e5a744bf6b6b is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2./info:regioninfo/1732467195878/Put/seqid=0 2024-11-24T16:54:06,012 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/b403f42ac59c4c2ea697146c9b8245fb, entries=1, sequenceid=22, filesize=5.9 K 2024-11-24T16:54:06,013 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 in 28ms, sequenceid=22, compaction requested=true 2024-11-24T16:54:06,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741846_1022 (size=7308) 2024-11-24T16:54:06,014 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/bacb127da256475c95fc605beb50760f, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/df6b3e8e38ca4ca38e1fe28d60333962, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/969d99b76f7146bcaafc0bff41b82292] to archive 2024-11-24T16:54:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741846_1022 (size=7308) 2024-11-24T16:54:06,015 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/info/ca78fc1560784e86ae14e5a744bf6b6b 2024-11-24T16:54:06,015 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T16:54:06,017 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/bacb127da256475c95fc605beb50760f to hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/bacb127da256475c95fc605beb50760f 2024-11-24T16:54:06,019 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/df6b3e8e38ca4ca38e1fe28d60333962 to hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/df6b3e8e38ca4ca38e1fe28d60333962 2024-11-24T16:54:06,021 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/969d99b76f7146bcaafc0bff41b82292 to hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info/969d99b76f7146bcaafc0bff41b82292 2024-11-24T16:54:06,021 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e335232f7491:44489 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T16:54:06,022 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [bacb127da256475c95fc605beb50760f=6033, df6b3e8e38ca4ca38e1fe28d60333962=6033, 969d99b76f7146bcaafc0bff41b82292=6033] 2024-11-24T16:54:06,026 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/78b4bc8c7fcd7ac9ef82d2beb46c2ad2/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-24T16:54:06,026 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:54:06,026 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2: Waiting for close lock at 1732467245984Running coprocessor pre-close hooks at 1732467245984Disabling compacts and flushes for region at 1732467245984Disabling writes for close at 1732467245985 (+1 ms)Obtaining lock to block concurrent updates at 1732467245985Preparing flush snapshotting stores in 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 at 1732467245985Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732467245985Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. at 1732467245986 (+1 ms)Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info: creating writer at 1732467245986Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info: appending metadata at 1732467245990 (+4 ms)Flushing 78b4bc8c7fcd7ac9ef82d2beb46c2ad2/info: closing flushed file at 1732467245990Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@494ce1e2: reopening flushed file at 1732467246005 (+15 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 78b4bc8c7fcd7ac9ef82d2beb46c2ad2 in 28ms, sequenceid=22, compaction requested=true at 1732467246013 (+8 ms)Writing region close event to WAL at 1732467246022 (+9 ms)Running coprocessor post-close hooks at 1732467246026 (+4 ms)Closed at 1732467246026 2024-11-24T16:54:06,027 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732467195508.78b4bc8c7fcd7ac9ef82d2beb46c2ad2. 2024-11-24T16:54:06,038 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/ns/8d1e271ebfec43969ab356c6d74b2f5a is 43, key is default/ns:d/1732467195396/Put/seqid=0 2024-11-24T16:54:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741847_1023 (size=5153) 2024-11-24T16:54:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741847_1023 (size=5153) 2024-11-24T16:54:06,044 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/ns/8d1e271ebfec43969ab356c6d74b2f5a 2024-11-24T16:54:06,071 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/table/9c66db56b10243b59cb295e85f9e2538 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732467195888/Put/seqid=0 2024-11-24T16:54:06,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741848_1024 (size=5508) 2024-11-24T16:54:06,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741848_1024 (size=5508) 2024-11-24T16:54:06,077 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/table/9c66db56b10243b59cb295e85f9e2538 2024-11-24T16:54:06,083 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/info/ca78fc1560784e86ae14e5a744bf6b6b as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/info/ca78fc1560784e86ae14e5a744bf6b6b 2024-11-24T16:54:06,088 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/info/ca78fc1560784e86ae14e5a744bf6b6b, entries=10, sequenceid=11, filesize=7.1 K 2024-11-24T16:54:06,089 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/ns/8d1e271ebfec43969ab356c6d74b2f5a as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/ns/8d1e271ebfec43969ab356c6d74b2f5a 2024-11-24T16:54:06,095 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/ns/8d1e271ebfec43969ab356c6d74b2f5a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T16:54:06,096 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/.tmp/table/9c66db56b10243b59cb295e85f9e2538 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/table/9c66db56b10243b59cb295e85f9e2538 2024-11-24T16:54:06,100 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/table/9c66db56b10243b59cb295e85f9e2538, entries=2, sequenceid=11, filesize=5.4 K 2024-11-24T16:54:06,101 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false 2024-11-24T16:54:06,106 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T16:54:06,106 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:54:06,106 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:54:06,107 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467245985Running coprocessor pre-close hooks at 1732467245985Disabling compacts and flushes for region at 1732467245985Disabling writes for close at 1732467245985Obtaining lock to block concurrent updates at 1732467245985Preparing flush snapshotting stores in 1588230740 at 1732467245985Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732467245986 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732467245986Flushing 1588230740/info: creating writer at 1732467245986Flushing 1588230740/info: appending metadata at 1732467246008 (+22 ms)Flushing 1588230740/info: closing flushed file at 1732467246008Flushing 1588230740/ns: creating writer at 1732467246020 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732467246037 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732467246037Flushing 1588230740/table: creating writer at 1732467246050 (+13 ms)Flushing 1588230740/table: appending metadata at 1732467246070 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732467246070Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22c773e5: reopening flushed file at 1732467246082 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5021e6d4: reopening flushed file at 1732467246089 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49318a9: reopening flushed file at 1732467246095 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false at 1732467246101 (+6 ms)Writing region close event to WAL at 1732467246102 (+1 ms)Running coprocessor post-close hooks at 1732467246106 (+4 ms)Closed at 1732467246106 2024-11-24T16:54:06,107 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:54:06,185 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(976): stopping server e335232f7491,37349,1732467194570; all regions closed. 2024-11-24T16:54:06,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741834_1010 (size=3306) 2024-11-24T16:54:06,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741834_1010 (size=3306) 2024-11-24T16:54:06,191 DEBUG [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/oldWALs 2024-11-24T16:54:06,191 INFO [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C37349%2C1732467194570.meta:.meta(num 1732467195344) 2024-11-24T16:54:06,192 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,192 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,192 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,192 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,192 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741844_1020 (size=1252) 2024-11-24T16:54:06,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741844_1020 (size=1252) 2024-11-24T16:54:06,197 DEBUG [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/oldWALs 2024-11-24T16:54:06,197 INFO [RS:0;e335232f7491:37349 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C37349%2C1732467194570:(num 1732467245965) 2024-11-24T16:54:06,197 DEBUG [RS:0;e335232f7491:37349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:54:06,197 INFO [RS:0;e335232f7491:37349 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:54:06,197 INFO [RS:0;e335232f7491:37349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:54:06,197 INFO [RS:0;e335232f7491:37349 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T16:54:06,198 INFO [RS:0;e335232f7491:37349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:54:06,198 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:54:06,198 INFO [RS:0;e335232f7491:37349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37349 2024-11-24T16:54:06,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,37349,1732467194570 2024-11-24T16:54:06,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:54:06,200 INFO [RS:0;e335232f7491:37349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:54:06,210 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,37349,1732467194570] 2024-11-24T16:54:06,212 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,37349,1732467194570 already deleted, retry=false 2024-11-24T16:54:06,212 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,37349,1732467194570 expired; onlineServers=0 2024-11-24T16:54:06,212 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,44489,1732467194516' ***** 2024-11-24T16:54:06,212 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:54:06,212 INFO [M:0;e335232f7491:44489 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:54:06,212 INFO [M:0;e335232f7491:44489 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:54:06,212 DEBUG [M:0;e335232f7491:44489 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:54:06,212 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:54:06,212 DEBUG [M:0;e335232f7491:44489 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:54:06,212 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467194735 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467194735,5,FailOnTimeoutGroup] 2024-11-24T16:54:06,212 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467194735 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467194735,5,FailOnTimeoutGroup] 2024-11-24T16:54:06,212 INFO [M:0;e335232f7491:44489 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:54:06,212 INFO [M:0;e335232f7491:44489 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:54:06,212 DEBUG [M:0;e335232f7491:44489 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:54:06,212 INFO [M:0;e335232f7491:44489 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:54:06,213 INFO [M:0;e335232f7491:44489 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:54:06,213 INFO [M:0;e335232f7491:44489 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:54:06,213 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:54:06,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:54:06,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:06,214 DEBUG [M:0;e335232f7491:44489 {}] zookeeper.ZKUtil(347): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T16:54:06,214 WARN [M:0;e335232f7491:44489 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T16:54:06,215 INFO [M:0;e335232f7491:44489 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/.lastflushedseqids 2024-11-24T16:54:06,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741849_1025 (size=130) 2024-11-24T16:54:06,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741849_1025 (size=130) 2024-11-24T16:54:06,228 INFO [M:0;e335232f7491:44489 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:54:06,228 INFO [M:0;e335232f7491:44489 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:54:06,229 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:54:06,229 INFO [M:0;e335232f7491:44489 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:06,229 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:06,229 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:54:06,229 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:06,229 INFO [M:0;e335232f7491:44489 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-24T16:54:06,247 DEBUG [M:0;e335232f7491:44489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c61a5d43ec3f419a95839e290b89f5e3 is 82, key is hbase:meta,,1/info:regioninfo/1732467195380/Put/seqid=0 2024-11-24T16:54:06,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741850_1026 (size=5672) 2024-11-24T16:54:06,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741850_1026 (size=5672) 2024-11-24T16:54:06,253 INFO [M:0;e335232f7491:44489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c61a5d43ec3f419a95839e290b89f5e3 2024-11-24T16:54:06,276 DEBUG [M:0;e335232f7491:44489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7de1d20ddb849538053bac7ec095795 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732467195891/Put/seqid=0 2024-11-24T16:54:06,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741851_1027 (size=7824) 2024-11-24T16:54:06,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741851_1027 (size=7824) 2024-11-24T16:54:06,281 INFO [M:0;e335232f7491:44489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7de1d20ddb849538053bac7ec095795 2024-11-24T16:54:06,286 INFO [M:0;e335232f7491:44489 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b7de1d20ddb849538053bac7ec095795 2024-11-24T16:54:06,301 DEBUG [M:0;e335232f7491:44489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87e0f4f073a34f98a2212e59262be067 is 69, key is e335232f7491,37349,1732467194570/rs:state/1732467194815/Put/seqid=0 2024-11-24T16:54:06,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:54:06,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37349-0x1014ae565150001, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:54:06,302 INFO [RS:0;e335232f7491:37349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:54:06,302 INFO [RS:0;e335232f7491:37349 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,37349,1732467194570; zookeeper connection closed. 2024-11-24T16:54:06,302 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@31aaf713 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@31aaf713 2024-11-24T16:54:06,303 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T16:54:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741852_1028 (size=5156) 2024-11-24T16:54:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741852_1028 (size=5156) 2024-11-24T16:54:06,310 INFO [M:0;e335232f7491:44489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87e0f4f073a34f98a2212e59262be067 2024-11-24T16:54:06,331 DEBUG [M:0;e335232f7491:44489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0e0b4d0fb84349fe8600423621069e5f is 52, key is load_balancer_on/state:d/1732467195504/Put/seqid=0 2024-11-24T16:54:06,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741853_1029 (size=5056) 2024-11-24T16:54:06,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741853_1029 (size=5056) 2024-11-24T16:54:06,337 INFO [M:0;e335232f7491:44489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0e0b4d0fb84349fe8600423621069e5f 2024-11-24T16:54:06,343 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c61a5d43ec3f419a95839e290b89f5e3 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c61a5d43ec3f419a95839e290b89f5e3 2024-11-24T16:54:06,349 INFO [M:0;e335232f7491:44489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c61a5d43ec3f419a95839e290b89f5e3, entries=8, sequenceid=121, filesize=5.5 K 2024-11-24T16:54:06,350 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7de1d20ddb849538053bac7ec095795 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b7de1d20ddb849538053bac7ec095795 2024-11-24T16:54:06,355 INFO [M:0;e335232f7491:44489 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b7de1d20ddb849538053bac7ec095795 2024-11-24T16:54:06,355 INFO [M:0;e335232f7491:44489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b7de1d20ddb849538053bac7ec095795, entries=14, sequenceid=121, filesize=7.6 K 2024-11-24T16:54:06,356 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87e0f4f073a34f98a2212e59262be067 as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87e0f4f073a34f98a2212e59262be067 2024-11-24T16:54:06,361 INFO [M:0;e335232f7491:44489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87e0f4f073a34f98a2212e59262be067, entries=1, sequenceid=121, filesize=5.0 K 2024-11-24T16:54:06,362 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0e0b4d0fb84349fe8600423621069e5f as hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0e0b4d0fb84349fe8600423621069e5f 2024-11-24T16:54:06,367 INFO [M:0;e335232f7491:44489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34699/user/jenkins/test-data/5d86a827-29ad-dfaa-5f84-80fd91061ad5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0e0b4d0fb84349fe8600423621069e5f, entries=1, sequenceid=121, filesize=4.9 K 2024-11-24T16:54:06,368 INFO [M:0;e335232f7491:44489 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=121, compaction requested=false 2024-11-24T16:54:06,370 INFO [M:0;e335232f7491:44489 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:06,370 DEBUG [M:0;e335232f7491:44489 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467246229Disabling compacts and flushes for region at 1732467246229Disabling writes for close at 1732467246229Obtaining lock to block concurrent updates at 1732467246229Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467246229Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1732467246229Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467246230 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467246230Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467246246 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467246246Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467246258 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467246275 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467246275Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467246286 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467246301 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467246301Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467246315 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467246331 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467246331Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@745477b2: reopening flushed file at 1732467246342 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@189d7515: reopening flushed file at 1732467246349 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@697f3709: reopening flushed file at 1732467246355 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@454ddae2: reopening flushed file at 1732467246361 (+6 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=121, compaction requested=false at 1732467246368 (+7 ms)Writing region close event to WAL at 1732467246370 (+2 ms)Closed at 1732467246370 2024-11-24T16:54:06,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,371 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,371 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,371 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,371 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:54:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34143 is added to blk_1073741830_1006 (size=53047) 2024-11-24T16:54:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43875 is added to blk_1073741830_1006 (size=53047) 2024-11-24T16:54:06,374 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:54:06,374 INFO [M:0;e335232f7491:44489 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:54:06,374 INFO [M:0;e335232f7491:44489 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44489 2024-11-24T16:54:06,374 INFO [M:0;e335232f7491:44489 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:54:06,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:54:06,476 INFO [M:0;e335232f7491:44489 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:54:06,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44489-0x1014ae565150000, quorum=127.0.0.1:50115, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:54:06,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72ef9fa2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:54:06,479 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@618f0457{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:54:06,479 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:54:06,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cac9c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:54:06,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b5fa12b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir/,STOPPED} 2024-11-24T16:54:06,482 WARN [BP-956033605-172.17.0.2-1732467193858 heartbeating to localhost/127.0.0.1:34699 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:54:06,482 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:54:06,482 WARN [BP-956033605-172.17.0.2-1732467193858 heartbeating to localhost/127.0.0.1:34699 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-956033605-172.17.0.2-1732467193858 (Datanode Uuid 71e6b226-7ae8-4198-a933-cde3ac6c257d) service to localhost/127.0.0.1:34699 2024-11-24T16:54:06,482 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:54:06,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data3/current/BP-956033605-172.17.0.2-1732467193858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:54:06,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data4/current/BP-956033605-172.17.0.2-1732467193858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:54:06,483 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:54:06,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b24fbcd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:54:06,485 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21bca0b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:54:06,485 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:54:06,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59a5f2c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:54:06,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e7fac47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir/,STOPPED} 2024-11-24T16:54:06,487 WARN [BP-956033605-172.17.0.2-1732467193858 heartbeating to localhost/127.0.0.1:34699 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:54:06,487 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:54:06,487 WARN [BP-956033605-172.17.0.2-1732467193858 heartbeating to localhost/127.0.0.1:34699 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-956033605-172.17.0.2-1732467193858 (Datanode Uuid fc2d3e14-97f7-40b5-afe0-0d1f0936fc25) service to localhost/127.0.0.1:34699 2024-11-24T16:54:06,487 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:54:06,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data1/current/BP-956033605-172.17.0.2-1732467193858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:54:06,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/cluster_9eecae05-0da2-91ae-2d19-c33f87ecf12d/data/data2/current/BP-956033605-172.17.0.2-1732467193858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:54:06,488 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:54:06,496 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b4e4fbe{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:54:06,496 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18baee58{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:54:06,497 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:54:06,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40e0483f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:54:06,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@701842fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir/,STOPPED} 2024-11-24T16:54:06,503 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:54:06,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:54:06,534 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34699 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e335232f7491:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34699 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34699 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34699 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34699 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34699 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34699 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34699 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:34699 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=210 (was 156) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5702 (was 6238) 2024-11-24T16:54:06,542 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=210, ProcessCount=11, AvailableMemoryMB=5702 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.log.dir so I do NOT create it in target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e139c4b9-911e-514c-e1c9-5df62042032d/hadoop.tmp.dir so I do NOT create it in target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436, deleteOnExit=true 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/test.cache.data in system properties and HBase conf 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:54:06,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:54:06,544 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:54:06,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:54:06,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:54:06,559 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:54:06,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:54:06,632 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:54:06,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:54:06,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:54:06,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:54:06,634 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:54:06,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25570184{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:54:06,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27fb1a0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:54:06,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@152e40b0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/java.io.tmpdir/jetty-localhost-38105-hadoop-hdfs-3_4_1-tests_jar-_-any-2386953362530757496/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:54:06,757 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@687d9ffd{HTTP/1.1, (http/1.1)}{localhost:38105} 2024-11-24T16:54:06,757 INFO [Time-limited test {}] server.Server(415): Started @237719ms 2024-11-24T16:54:06,771 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:54:06,832 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:54:06,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:54:06,851 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:54:06,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:54:06,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:54:06,852 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:54:06,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9a438{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:54:06,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79864455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:54:06,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:06,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:06,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15c8c411{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/java.io.tmpdir/jetty-localhost-45515-hadoop-hdfs-3_4_1-tests_jar-_-any-15792122207152858519/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:54:06,974 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d6e0cf8{HTTP/1.1, (http/1.1)}{localhost:45515} 2024-11-24T16:54:06,974 INFO [Time-limited test {}] server.Server(415): Started @237936ms 2024-11-24T16:54:06,976 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:54:07,010 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:54:07,013 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:54:07,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:54:07,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:54:07,014 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:54:07,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ff7780b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:54:07,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b28e221{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:54:07,106 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data1/current/BP-963785250-172.17.0.2-1732467246566/current, will proceed with Du for space computation calculation, 2024-11-24T16:54:07,106 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data2/current/BP-963785250-172.17.0.2-1732467246566/current, will proceed with Du for space computation calculation, 2024-11-24T16:54:07,128 WARN [Thread-1934 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:54:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x358e0abe37827b05 with lease ID 0x6afd54c22fdec095: Processing first storage report for DS-d1e2ebaf-d92f-47ce-9a4f-b6b32abfcd8b from datanode DatanodeRegistration(127.0.0.1:43271, datanodeUuid=c3a4c52f-cf37-4df8-b529-238189abe78b, infoPort=38963, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566) 2024-11-24T16:54:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x358e0abe37827b05 with lease ID 0x6afd54c22fdec095: from storage DS-d1e2ebaf-d92f-47ce-9a4f-b6b32abfcd8b node DatanodeRegistration(127.0.0.1:43271, datanodeUuid=c3a4c52f-cf37-4df8-b529-238189abe78b, infoPort=38963, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:54:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x358e0abe37827b05 with lease ID 0x6afd54c22fdec095: Processing first storage report for DS-e14cdbb4-b4ec-4e10-8998-4b53b396f067 from datanode DatanodeRegistration(127.0.0.1:43271, datanodeUuid=c3a4c52f-cf37-4df8-b529-238189abe78b, infoPort=38963, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566) 2024-11-24T16:54:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x358e0abe37827b05 with lease ID 0x6afd54c22fdec095: from storage DS-e14cdbb4-b4ec-4e10-8998-4b53b396f067 node DatanodeRegistration(127.0.0.1:43271, datanodeUuid=c3a4c52f-cf37-4df8-b529-238189abe78b, infoPort=38963, infoSecurePort=0, ipcPort=39959, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:54:07,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@629cd82f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/java.io.tmpdir/jetty-localhost-43707-hadoop-hdfs-3_4_1-tests_jar-_-any-16828491670168043149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:54:07,142 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f25c1cb{HTTP/1.1, (http/1.1)}{localhost:43707} 2024-11-24T16:54:07,142 INFO [Time-limited test {}] server.Server(415): Started @238104ms 2024-11-24T16:54:07,144 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:54:07,250 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data3/current/BP-963785250-172.17.0.2-1732467246566/current, will proceed with Du for space computation calculation, 2024-11-24T16:54:07,250 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data4/current/BP-963785250-172.17.0.2-1732467246566/current, will proceed with Du for space computation calculation, 2024-11-24T16:54:07,269 WARN [Thread-1970 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:54:07,271 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2976a840efa04227 with lease ID 0x6afd54c22fdec096: Processing first storage report for DS-c93792d8-a181-4b1b-83db-4dddb0cc5f60 from datanode DatanodeRegistration(127.0.0.1:40787, datanodeUuid=3deceb80-ed62-436c-8091-5ead47cbf088, infoPort=37491, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566) 2024-11-24T16:54:07,271 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2976a840efa04227 with lease ID 0x6afd54c22fdec096: from storage DS-c93792d8-a181-4b1b-83db-4dddb0cc5f60 node DatanodeRegistration(127.0.0.1:40787, datanodeUuid=3deceb80-ed62-436c-8091-5ead47cbf088, infoPort=37491, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:54:07,271 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2976a840efa04227 with lease ID 0x6afd54c22fdec096: Processing first storage report for DS-5472c8b5-800c-422f-947f-9ed05bfd41ea from datanode DatanodeRegistration(127.0.0.1:40787, datanodeUuid=3deceb80-ed62-436c-8091-5ead47cbf088, infoPort=37491, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566) 2024-11-24T16:54:07,271 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2976a840efa04227 with lease ID 0x6afd54c22fdec096: from storage DS-5472c8b5-800c-422f-947f-9ed05bfd41ea node DatanodeRegistration(127.0.0.1:40787, datanodeUuid=3deceb80-ed62-436c-8091-5ead47cbf088, infoPort=37491, infoSecurePort=0, ipcPort=46319, storageInfo=lv=-57;cid=testClusterID;nsid=1979130777;c=1732467246566), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:54:07,367 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f 2024-11-24T16:54:07,370 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/zookeeper_0, clientPort=57206, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:54:07,371 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57206 2024-11-24T16:54:07,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:54:07,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:54:07,385 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682 with version=8 2024-11-24T16:54:07,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:54:07,388 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:54:07,388 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:54:07,389 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35783 2024-11-24T16:54:07,390 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35783 connecting to ZooKeeper ensemble=127.0.0.1:57206 2024-11-24T16:54:07,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:357830x0, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:54:07,398 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35783-0x1014ae633990000 connected 2024-11-24T16:54:07,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,418 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:54:07,420 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682, hbase.cluster.distributed=false 2024-11-24T16:54:07,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:54:07,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35783 2024-11-24T16:54:07,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35783 2024-11-24T16:54:07,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35783 2024-11-24T16:54:07,428 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35783 2024-11-24T16:54:07,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35783 2024-11-24T16:54:07,444 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:54:07,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:54:07,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:54:07,444 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:54:07,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:54:07,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:54:07,445 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:54:07,445 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:54:07,445 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42875 2024-11-24T16:54:07,447 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42875 connecting to ZooKeeper ensemble=127.0.0.1:57206 2024-11-24T16:54:07,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428750x0, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:54:07,455 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428750x0, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:54:07,455 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42875-0x1014ae633990001 connected 2024-11-24T16:54:07,455 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:54:07,456 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:54:07,456 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:54:07,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:54:07,457 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42875 2024-11-24T16:54:07,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42875 2024-11-24T16:54:07,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42875 2024-11-24T16:54:07,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42875 2024-11-24T16:54:07,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42875 2024-11-24T16:54:07,471 DEBUG [M:0;e335232f7491:35783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:35783 2024-11-24T16:54:07,471 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,35783,1732467247387 2024-11-24T16:54:07,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:54:07,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:54:07,473 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,35783,1732467247387 2024-11-24T16:54:07,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:54:07,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,475 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:54:07,475 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,35783,1732467247387 from backup master directory 2024-11-24T16:54:07,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:54:07,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,35783,1732467247387 2024-11-24T16:54:07,476 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:54:07,476 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,35783,1732467247387 2024-11-24T16:54:07,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:54:07,481 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/hbase.id] with ID: 81de65d6-a575-470d-98e0-4d4dbaaeac9c 2024-11-24T16:54:07,481 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/.tmp/hbase.id 2024-11-24T16:54:07,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:54:07,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:54:07,487 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/.tmp/hbase.id]:[hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/hbase.id] 2024-11-24T16:54:07,500 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:07,500 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:54:07,502 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T16:54:07,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:54:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:54:07,512 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:54:07,513 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:54:07,513 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:54:07,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:54:07,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:54:07,524 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store 2024-11-24T16:54:07,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:54:07,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:54:07,531 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:07,531 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:54:07,531 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:07,531 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:07,531 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:54:07,531 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:07,531 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:54:07,531 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467247531Disabling compacts and flushes for region at 1732467247531Disabling writes for close at 1732467247531Writing region close event to WAL at 1732467247531Closed at 1732467247531 2024-11-24T16:54:07,532 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/.initializing 2024-11-24T16:54:07,532 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/WALs/e335232f7491,35783,1732467247387 2024-11-24T16:54:07,535 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C35783%2C1732467247387, suffix=, logDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/WALs/e335232f7491,35783,1732467247387, archiveDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/oldWALs, maxLogs=10 2024-11-24T16:54:07,535 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C35783%2C1732467247387.1732467247535 2024-11-24T16:54:07,540 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/WALs/e335232f7491,35783,1732467247387/e335232f7491%2C35783%2C1732467247387.1732467247535 2024-11-24T16:54:07,549 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38963:38963),(127.0.0.1/127.0.0.1:37491:37491)] 2024-11-24T16:54:07,550 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:54:07,550 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:07,551 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,551 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:54:07,554 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:07,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:54:07,556 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:54:07,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:54:07,558 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:54:07,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:54:07,559 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:54:07,560 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,561 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,561 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,563 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,563 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,563 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:54:07,564 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:54:07,567 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:54:07,567 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856521, jitterRate=0.08912359178066254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:54:07,568 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467247551Initializing all the Stores at 1732467247552 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467247552Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467247552Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467247552Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467247552Cleaning up temporary data from old regions at 1732467247563 (+11 ms)Region opened successfully at 1732467247568 (+5 ms) 2024-11-24T16:54:07,568 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:54:07,572 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7555d36c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:54:07,572 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:54:07,573 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:54:07,573 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:54:07,573 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:54:07,573 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T16:54:07,573 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T16:54:07,574 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:54:07,578 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:54:07,579 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:54:07,580 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:54:07,580 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:54:07,581 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:54:07,582 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:54:07,582 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:54:07,589 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:54:07,590 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:54:07,591 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:54:07,594 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:54:07,596 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:54:07,597 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:54:07,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:54:07,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:54:07,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,599 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,35783,1732467247387, sessionid=0x1014ae633990000, setting cluster-up flag (Was=false) 2024-11-24T16:54:07,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,612 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:54:07,613 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,35783,1732467247387 2024-11-24T16:54:07,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:07,625 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:54:07,626 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,35783,1732467247387 2024-11-24T16:54:07,627 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:54:07,629 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:54:07,629 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:54:07,629 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:54:07,629 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,35783,1732467247387 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:54:07,631 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,633 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:54:07,633 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:54:07,633 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467277633 2024-11-24T16:54:07,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:54:07,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:54:07,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:54:07,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:54:07,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:54:07,634 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:54:07,634 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,634 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:54:07,636 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,637 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:54:07,637 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:54:07,637 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:54:07,637 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:54:07,637 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:54:07,638 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467247637,5,FailOnTimeoutGroup] 2024-11-24T16:54:07,638 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467247638,5,FailOnTimeoutGroup] 2024-11-24T16:54:07,638 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,638 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:54:07,638 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,638 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:54:07,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:54:07,642 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:54:07,642 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682 2024-11-24T16:54:07,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:54:07,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:54:07,652 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:07,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:54:07,655 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:54:07,655 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:07,656 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:54:07,657 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:54:07,657 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:07,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:54:07,659 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:54:07,659 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:07,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:54:07,660 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(746): ClusterId : 81de65d6-a575-470d-98e0-4d4dbaaeac9c 2024-11-24T16:54:07,660 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:54:07,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:54:07,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:07,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:07,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:54:07,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740 2024-11-24T16:54:07,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740 2024-11-24T16:54:07,663 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:54:07,663 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:54:07,664 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:54:07,664 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:54:07,665 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:54:07,665 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:54:07,666 DEBUG [RS:0;e335232f7491:42875 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e78f8b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:54:07,666 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:54:07,668 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:54:07,669 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710782, jitterRate=-0.09619434177875519}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:54:07,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467247652Initializing all the Stores at 1732467247653 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467247653Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467247653Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467247653Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467247653Cleaning up temporary data from old regions at 1732467247664 (+11 ms)Region opened successfully at 1732467247670 (+6 ms) 2024-11-24T16:54:07,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:54:07,670 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:54:07,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:54:07,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:54:07,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:54:07,673 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:54:07,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467247670Disabling compacts and flushes for region at 1732467247670Disabling writes for close at 1732467247670Writing region close event to WAL at 1732467247673 (+3 ms)Closed at 1732467247673 2024-11-24T16:54:07,674 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:54:07,674 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:54:07,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:54:07,676 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:54:07,677 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:54:07,684 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:42875 2024-11-24T16:54:07,684 INFO [RS:0;e335232f7491:42875 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:54:07,684 INFO [RS:0;e335232f7491:42875 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:54:07,684 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:54:07,685 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,35783,1732467247387 with port=42875, startcode=1732467247444 2024-11-24T16:54:07,685 DEBUG [RS:0;e335232f7491:42875 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:54:07,689 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55911, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:54:07,689 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,42875,1732467247444 2024-11-24T16:54:07,690 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] master.ServerManager(517): Registering regionserver=e335232f7491,42875,1732467247444 2024-11-24T16:54:07,691 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682 2024-11-24T16:54:07,691 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39069 2024-11-24T16:54:07,691 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:54:07,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:54:07,693 DEBUG [RS:0;e335232f7491:42875 {}] zookeeper.ZKUtil(111): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,42875,1732467247444 2024-11-24T16:54:07,693 WARN [RS:0;e335232f7491:42875 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:54:07,693 INFO [RS:0;e335232f7491:42875 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:54:07,693 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444 2024-11-24T16:54:07,695 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,42875,1732467247444] 2024-11-24T16:54:07,697 INFO [RS:0;e335232f7491:42875 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:54:07,699 INFO [RS:0;e335232f7491:42875 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:54:07,699 INFO [RS:0;e335232f7491:42875 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:54:07,699 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,700 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:54:07,701 INFO [RS:0;e335232f7491:42875 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:54:07,701 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:54:07,701 DEBUG [RS:0;e335232f7491:42875 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:54:07,702 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,702 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,702 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,702 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,702 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,702 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,42875,1732467247444-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:54:07,722 INFO [RS:0;e335232f7491:42875 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:54:07,722 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,42875,1732467247444-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,722 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,722 INFO [RS:0;e335232f7491:42875 {}] regionserver.Replication(171): e335232f7491,42875,1732467247444 started 2024-11-24T16:54:07,740 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:07,740 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,42875,1732467247444, RpcServer on e335232f7491/172.17.0.2:42875, sessionid=0x1014ae633990001 2024-11-24T16:54:07,740 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:54:07,740 DEBUG [RS:0;e335232f7491:42875 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,42875,1732467247444 2024-11-24T16:54:07,740 DEBUG [RS:0;e335232f7491:42875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,42875,1732467247444' 2024-11-24T16:54:07,740 DEBUG [RS:0;e335232f7491:42875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:54:07,741 DEBUG [RS:0;e335232f7491:42875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:54:07,741 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:54:07,741 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:54:07,741 DEBUG [RS:0;e335232f7491:42875 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,42875,1732467247444 2024-11-24T16:54:07,741 DEBUG [RS:0;e335232f7491:42875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,42875,1732467247444' 2024-11-24T16:54:07,741 DEBUG [RS:0;e335232f7491:42875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:54:07,742 DEBUG [RS:0;e335232f7491:42875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:54:07,742 DEBUG [RS:0;e335232f7491:42875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:54:07,742 INFO [RS:0;e335232f7491:42875 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:54:07,742 INFO [RS:0;e335232f7491:42875 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:54:07,827 WARN [e335232f7491:35783 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T16:54:07,844 INFO [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C42875%2C1732467247444, suffix=, logDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444, archiveDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/oldWALs, maxLogs=32 2024-11-24T16:54:07,845 INFO [RS:0;e335232f7491:42875 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C42875%2C1732467247444.1732467247844 2024-11-24T16:54:07,851 INFO [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444/e335232f7491%2C42875%2C1732467247444.1732467247844 2024-11-24T16:54:07,851 DEBUG [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37491:37491),(127.0.0.1/127.0.0.1:38963:38963)] 2024-11-24T16:54:07,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:07,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:08,077 DEBUG [e335232f7491:35783 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:54:08,078 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:08,079 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,42875,1732467247444, state=OPENING 2024-11-24T16:54:08,082 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:54:08,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:08,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:54:08,084 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:54:08,084 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:54:08,084 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:54:08,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,42875,1732467247444}] 2024-11-24T16:54:08,237 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:54:08,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:54:08,243 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:54:08,243 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:54:08,245 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C42875%2C1732467247444.meta, suffix=.meta, logDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444, archiveDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/oldWALs, maxLogs=32 2024-11-24T16:54:08,245 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C42875%2C1732467247444.meta.1732467248245.meta 2024-11-24T16:54:08,250 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444/e335232f7491%2C42875%2C1732467247444.meta.1732467248245.meta 2024-11-24T16:54:08,251 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37491:37491),(127.0.0.1/127.0.0.1:38963:38963)] 2024-11-24T16:54:08,252 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:54:08,252 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:54:08,252 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:54:08,252 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:54:08,252 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:54:08,253 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:08,253 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:54:08,253 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:54:08,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:54:08,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:54:08,255 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:08,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:08,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:54:08,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:54:08,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:08,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:08,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:54:08,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:54:08,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:08,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:08,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:54:08,259 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:54:08,259 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:08,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:54:08,259 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:54:08,260 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740 2024-11-24T16:54:08,261 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740 2024-11-24T16:54:08,262 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:54:08,262 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:54:08,262 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:54:08,264 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:54:08,264 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695015, jitterRate=-0.11624370515346527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:54:08,264 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:54:08,265 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467248253Writing region info on filesystem at 1732467248253Initializing all the Stores at 1732467248254 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467248254Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467248254Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467248254Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467248254Cleaning up temporary data from old regions at 1732467248262 (+8 ms)Running coprocessor post-open hooks at 1732467248264 (+2 ms)Region opened successfully at 1732467248265 (+1 ms) 2024-11-24T16:54:08,266 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467248237 2024-11-24T16:54:08,268 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:54:08,268 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:54:08,269 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:08,270 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,42875,1732467247444, state=OPEN 2024-11-24T16:54:08,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:54:08,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:54:08,278 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,42875,1732467247444 2024-11-24T16:54:08,278 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:54:08,278 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:54:08,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:54:08,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,42875,1732467247444 in 194 msec 2024-11-24T16:54:08,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:54:08,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-24T16:54:08,284 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:54:08,284 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:54:08,285 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:54:08,286 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,42875,1732467247444, seqNum=-1] 2024-11-24T16:54:08,286 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:54:08,287 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49893, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:54:08,292 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 664 msec 2024-11-24T16:54:08,293 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467248292, completionTime=-1 2024-11-24T16:54:08,293 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:54:08,293 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467308295 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467368295 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,35783,1732467247387-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,35783,1732467247387-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,35783,1732467247387-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:35783, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,296 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,297 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:54:08,299 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.822sec 2024-11-24T16:54:08,299 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:54:08,299 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:54:08,299 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:54:08,299 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:54:08,300 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:54:08,300 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,35783,1732467247387-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:54:08,300 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,35783,1732467247387-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:54:08,302 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:54:08,302 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:54:08,302 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,35783,1732467247387-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:54:08,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2998c4ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:54:08,360 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,35783,-1 for getting cluster id 2024-11-24T16:54:08,361 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:54:08,362 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '81de65d6-a575-470d-98e0-4d4dbaaeac9c' 2024-11-24T16:54:08,362 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:54:08,362 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "81de65d6-a575-470d-98e0-4d4dbaaeac9c" 2024-11-24T16:54:08,362 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bcfbd03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:54:08,362 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,35783,-1] 2024-11-24T16:54:08,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:54:08,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:54:08,364 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35854, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:54:08,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69b834dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:54:08,365 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:54:08,366 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,42875,1732467247444, seqNum=-1] 2024-11-24T16:54:08,366 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:54:08,367 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:54:08,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,35783,1732467247387 2024-11-24T16:54:08,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:54:08,372 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:54:08,372 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T16:54:08,373 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is e335232f7491,35783,1732467247387 2024-11-24T16:54:08,373 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7be5d4b3 2024-11-24T16:54:08,373 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T16:54:08,374 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T16:54:08,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T16:54:08,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T16:54:08,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:54:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-24T16:54:08,377 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T16:54:08,378 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:08,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-24T16:54:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:54:08,379 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T16:54:08,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741835_1011 (size=381) 2024-11-24T16:54:08,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741835_1011 (size=381) 2024-11-24T16:54:08,388 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e3ca43f3124bd9e44ae59b22d0fde460, NAME => 'TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682 2024-11-24T16:54:08,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741836_1012 (size=64) 2024-11-24T16:54:08,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741836_1012 (size=64) 2024-11-24T16:54:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing e3ca43f3124bd9e44ae59b22d0fde460, disabling compactions & flushes 2024-11-24T16:54:08,394 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. after waiting 0 ms 2024-11-24T16:54:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,394 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for e3ca43f3124bd9e44ae59b22d0fde460: Waiting for close lock at 1732467248394Disabling compacts and flushes for region at 1732467248394Disabling writes for close at 1732467248394Writing region close event to WAL at 1732467248394Closed at 1732467248394 2024-11-24T16:54:08,396 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T16:54:08,396 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732467248396"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467248396"}]},"ts":"1732467248396"} 2024-11-24T16:54:08,398 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T16:54:08,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T16:54:08,400 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467248399"}]},"ts":"1732467248399"} 2024-11-24T16:54:08,402 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-24T16:54:08,402 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, ASSIGN}] 2024-11-24T16:54:08,403 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, ASSIGN 2024-11-24T16:54:08,404 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, ASSIGN; state=OFFLINE, location=e335232f7491,42875,1732467247444; forceNewPlan=false, retain=false 2024-11-24T16:54:08,555 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e3ca43f3124bd9e44ae59b22d0fde460, regionState=OPENING, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:08,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, ASSIGN because future has completed 2024-11-24T16:54:08,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444}] 2024-11-24T16:54:08,715 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,715 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e3ca43f3124bd9e44ae59b22d0fde460, NAME => 'TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:54:08,715 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,715 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:08,715 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,715 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,717 INFO [StoreOpener-e3ca43f3124bd9e44ae59b22d0fde460-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,718 INFO [StoreOpener-e3ca43f3124bd9e44ae59b22d0fde460-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e3ca43f3124bd9e44ae59b22d0fde460 columnFamilyName info 2024-11-24T16:54:08,718 DEBUG [StoreOpener-e3ca43f3124bd9e44ae59b22d0fde460-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:08,719 INFO [StoreOpener-e3ca43f3124bd9e44ae59b22d0fde460-1 {}] regionserver.HStore(327): Store=e3ca43f3124bd9e44ae59b22d0fde460/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:54:08,719 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,719 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,720 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,720 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,720 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,722 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,724 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:54:08,724 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e3ca43f3124bd9e44ae59b22d0fde460; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820268, jitterRate=0.0430254191160202}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:54:08,724 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:08,725 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e3ca43f3124bd9e44ae59b22d0fde460: Running coprocessor pre-open hook at 1732467248715Writing region info on filesystem at 1732467248715Initializing all the Stores at 1732467248716 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467248716Cleaning up temporary data from old regions at 1732467248720 (+4 ms)Running coprocessor post-open hooks at 1732467248724 (+4 ms)Region opened successfully at 1732467248725 (+1 ms) 2024-11-24T16:54:08,726 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., pid=6, masterSystemTime=1732467248711 2024-11-24T16:54:08,728 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,728 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:08,729 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e3ca43f3124bd9e44ae59b22d0fde460, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:08,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 because future has completed 2024-11-24T16:54:08,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T16:54:08,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 in 174 msec 2024-11-24T16:54:08,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T16:54:08,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, ASSIGN in 333 msec 2024-11-24T16:54:08,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T16:54:08,738 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732467248738"}]},"ts":"1732467248738"} 2024-11-24T16:54:08,741 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-24T16:54:08,742 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T16:54:08,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 367 msec 2024-11-24T16:54:08,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:08,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:09,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:09,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:10,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:10,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:11,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,553 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:54:11,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:11,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:11,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:12,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:12,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:13,697 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T16:54:13,698 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-24T16:54:13,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:13,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:14,301 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T16:54:14,301 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T16:54:14,302 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T16:54:14,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:14,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:15,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:15,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:16,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:16,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:17,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:17,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35783 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T16:54:18,472 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-24T16:54:18,472 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-24T16:54:18,475 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-24T16:54:18,475 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:18,478 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2] 2024-11-24T16:54:18,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:18,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:54:18,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/645347b5201c468e9c51be0b8d4f5bbf is 1080, key is row0001/info:/1732467258479/Put/seqid=0 2024-11-24T16:54:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741837_1013 (size=12509) 2024-11-24T16:54:18,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741837_1013 (size=12509) 2024-11-24T16:54:18,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/645347b5201c468e9c51be0b8d4f5bbf 2024-11-24T16:54:18,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/645347b5201c468e9c51be0b8d4f5bbf as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/645347b5201c468e9c51be0b8d4f5bbf 2024-11-24T16:54:18,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T16:54:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32774 deadline: 1732467268531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 2024-11-24T16:54:18,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/645347b5201c468e9c51be0b8d4f5bbf, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T16:54:18,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for e3ca43f3124bd9e44ae59b22d0fde460 in 48ms, sequenceid=11, compaction requested=false 2024-11-24T16:54:18,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:18,564 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:18,565 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:18,565 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2 because the exception is null or not the one we care about 2024-11-24T16:54:18,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:18,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:19,804 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:54:19,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:19,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:19,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:20,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:20,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:21,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:21,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:22,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:22,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:23,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:23,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:24,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:24,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:25,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:25,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:26,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:26,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:27,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:27,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:28,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:28,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T16:54:28,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/f1f9358cddf947cfa596ba0c24a97e27 is 1080, key is row0008/info:/1732467258491/Put/seqid=0 2024-11-24T16:54:28,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741838_1014 (size=29761) 2024-11-24T16:54:28,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741838_1014 (size=29761) 2024-11-24T16:54:28,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/f1f9358cddf947cfa596ba0c24a97e27 2024-11-24T16:54:28,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/f1f9358cddf947cfa596ba0c24a97e27 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27 2024-11-24T16:54:28,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27, entries=23, sequenceid=37, filesize=29.1 K 2024-11-24T16:54:28,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for e3ca43f3124bd9e44ae59b22d0fde460 in 32ms, sequenceid=37, compaction requested=false 2024-11-24T16:54:28,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:28,695 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-24T16:54:28,695 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:28,696 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27 because midkey is the same as first or last row 2024-11-24T16:54:28,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:28,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:29,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:29,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:30,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:54:30,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/ffe345ad7899457fb450f71f77a69f99 is 1080, key is row0031/info:/1732467268665/Put/seqid=0 2024-11-24T16:54:30,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741839_1015 (size=12509) 2024-11-24T16:54:30,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741839_1015 (size=12509) 2024-11-24T16:54:30,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/ffe345ad7899457fb450f71f77a69f99 2024-11-24T16:54:30,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/ffe345ad7899457fb450f71f77a69f99 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/ffe345ad7899457fb450f71f77a69f99 2024-11-24T16:54:30,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/ffe345ad7899457fb450f71f77a69f99, entries=7, sequenceid=47, filesize=12.2 K 2024-11-24T16:54:30,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for e3ca43f3124bd9e44ae59b22d0fde460 in 29ms, sequenceid=47, compaction requested=true 2024-11-24T16:54:30,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:30,707 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-24T16:54:30,707 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:30,707 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27 because midkey is the same as first or last row 2024-11-24T16:54:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:30,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3ca43f3124bd9e44ae59b22d0fde460:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:54:30,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:30,708 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:54:30,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-24T16:54:30,709 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:54:30,709 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): e3ca43f3124bd9e44ae59b22d0fde460/info is initiating minor compaction (all files) 2024-11-24T16:54:30,709 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3ca43f3124bd9e44ae59b22d0fde460/info in TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:30,709 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/645347b5201c468e9c51be0b8d4f5bbf, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/ffe345ad7899457fb450f71f77a69f99] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp, totalSize=53.5 K 2024-11-24T16:54:30,710 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 645347b5201c468e9c51be0b8d4f5bbf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732467258479 2024-11-24T16:54:30,710 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1f9358cddf947cfa596ba0c24a97e27, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732467258491 2024-11-24T16:54:30,711 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting ffe345ad7899457fb450f71f77a69f99, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732467268665 2024-11-24T16:54:30,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/424be6fb3f5f42edab5cda05478cb442 is 1080, key is row0038/info:/1732467270679/Put/seqid=0 2024-11-24T16:54:30,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741840_1016 (size=23299) 2024-11-24T16:54:30,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741840_1016 (size=23299) 2024-11-24T16:54:30,723 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=67 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/424be6fb3f5f42edab5cda05478cb442 2024-11-24T16:54:30,727 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3ca43f3124bd9e44ae59b22d0fde460#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:30,728 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/8e84d2704efc471da649f7efe2c4e6a9 is 1080, key is row0001/info:/1732467258479/Put/seqid=0 2024-11-24T16:54:30,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/424be6fb3f5f42edab5cda05478cb442 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/424be6fb3f5f42edab5cda05478cb442 2024-11-24T16:54:30,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741841_1017 (size=44978) 2024-11-24T16:54:30,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741841_1017 (size=44978) 2024-11-24T16:54:30,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/424be6fb3f5f42edab5cda05478cb442, entries=17, sequenceid=67, filesize=22.8 K 2024-11-24T16:54:30,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for e3ca43f3124bd9e44ae59b22d0fde460 in 31ms, sequenceid=67, compaction requested=false 2024-11-24T16:54:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=76.2 K, sizeToCheck=16.0 K 2024-11-24T16:54:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27 because midkey is the same as first or last row 2024-11-24T16:54:30,741 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/8e84d2704efc471da649f7efe2c4e6a9 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 2024-11-24T16:54:30,748 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3ca43f3124bd9e44ae59b22d0fde460/info of e3ca43f3124bd9e44ae59b22d0fde460 into 8e84d2704efc471da649f7efe2c4e6a9(size=43.9 K), total size for store is 66.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:30,748 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:30,748 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., storeName=e3ca43f3124bd9e44ae59b22d0fde460/info, priority=13, startTime=1732467270707; duration=0sec 2024-11-24T16:54:30,748 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-24T16:54:30,748 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:30,748 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 because midkey is the same as first or last row 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 because midkey is the same as first or last row 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 because midkey is the same as first or last row 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:30,749 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3ca43f3124bd9e44ae59b22d0fde460:info 2024-11-24T16:54:30,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:30,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:31,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:31,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:32,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:32,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T16:54:32,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/919f0afa46894040ad401aed77e315dd is 1080, key is row0055/info:/1732467270708/Put/seqid=0 2024-11-24T16:54:32,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741842_1018 (size=16817) 2024-11-24T16:54:32,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741842_1018 (size=16817) 2024-11-24T16:54:32,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/919f0afa46894040ad401aed77e315dd 2024-11-24T16:54:32,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/919f0afa46894040ad401aed77e315dd as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/919f0afa46894040ad401aed77e315dd 2024-11-24T16:54:32,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/919f0afa46894040ad401aed77e315dd, entries=11, sequenceid=82, filesize=16.4 K 2024-11-24T16:54:32,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for e3ca43f3124bd9e44ae59b22d0fde460 in 25ms, sequenceid=82, compaction requested=true 2024-11-24T16:54:32,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:32,759 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,759 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,759 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 because midkey is the same as first or last row 2024-11-24T16:54:32,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3ca43f3124bd9e44ae59b22d0fde460:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:54:32,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:32,759 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:54:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:32,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T16:54:32,760 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:54:32,760 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): e3ca43f3124bd9e44ae59b22d0fde460/info is initiating minor compaction (all files) 2024-11-24T16:54:32,760 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3ca43f3124bd9e44ae59b22d0fde460/info in TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:32,760 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/424be6fb3f5f42edab5cda05478cb442, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/919f0afa46894040ad401aed77e315dd] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp, totalSize=83.1 K 2024-11-24T16:54:32,761 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e84d2704efc471da649f7efe2c4e6a9, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732467258479 2024-11-24T16:54:32,762 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 424be6fb3f5f42edab5cda05478cb442, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=67, earliestPutTs=1732467270679 2024-11-24T16:54:32,762 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 919f0afa46894040ad401aed77e315dd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732467270708 2024-11-24T16:54:32,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/f8eed75c65454bc4b9b194c983b6fa00 is 1080, key is row0066/info:/1732467272735/Put/seqid=0 2024-11-24T16:54:32,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741843_1019 (size=18987) 2024-11-24T16:54:32,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741843_1019 (size=18987) 2024-11-24T16:54:32,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/f8eed75c65454bc4b9b194c983b6fa00 2024-11-24T16:54:32,782 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3ca43f3124bd9e44ae59b22d0fde460#info#compaction#62 average throughput is 8.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:32,783 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/7e5e28a7334846a9a5b63f127ba2b343 is 1080, key is row0001/info:/1732467258479/Put/seqid=0 2024-11-24T16:54:32,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/f8eed75c65454bc4b9b194c983b6fa00 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f8eed75c65454bc4b9b194c983b6fa00 2024-11-24T16:54:32,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f8eed75c65454bc4b9b194c983b6fa00, entries=13, sequenceid=98, filesize=18.5 K 2024-11-24T16:54:32,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for e3ca43f3124bd9e44ae59b22d0fde460 in 32ms, sequenceid=98, compaction requested=false 2024-11-24T16:54:32,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:32,792 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.6 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,792 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,792 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 because midkey is the same as first or last row 2024-11-24T16:54:32,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:32,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-24T16:54:32,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741844_1020 (size=75378) 2024-11-24T16:54:32,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741844_1020 (size=75378) 2024-11-24T16:54:32,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/b05046857f9d4081b75c3401e51d47c8 is 1080, key is row0079/info:/1732467272761/Put/seqid=0 2024-11-24T16:54:32,804 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/7e5e28a7334846a9a5b63f127ba2b343 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343 2024-11-24T16:54:32,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741845_1021 (size=22222) 2024-11-24T16:54:32,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741845_1021 (size=22222) 2024-11-24T16:54:32,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/b05046857f9d4081b75c3401e51d47c8 2024-11-24T16:54:32,811 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3ca43f3124bd9e44ae59b22d0fde460/info of e3ca43f3124bd9e44ae59b22d0fde460 into 7e5e28a7334846a9a5b63f127ba2b343(size=73.6 K), total size for store is 92.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:32,811 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., storeName=e3ca43f3124bd9e44ae59b22d0fde460/info, priority=13, startTime=1732467272759; duration=0sec 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,811 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,813 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:32,813 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:32,813 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3ca43f3124bd9e44ae59b22d0fde460:info 2024-11-24T16:54:32,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/b05046857f9d4081b75c3401e51d47c8 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/b05046857f9d4081b75c3401e51d47c8 2024-11-24T16:54:32,814 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] assignment.AssignmentManager(1363): Split request from e335232f7491,42875,1732467247444, parent={ENCODED => e3ca43f3124bd9e44ae59b22d0fde460, NAME => 'TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T16:54:32,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/b05046857f9d4081b75c3401e51d47c8, entries=16, sequenceid=117, filesize=21.7 K 2024-11-24T16:54:32,820 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=e335232f7491,42875,1732467247444 2024-11-24T16:54:32,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=2.10 KB/2152 for e3ca43f3124bd9e44ae59b22d0fde460 in 28ms, sequenceid=117, compaction requested=true 2024-11-24T16:54:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3ca43f3124bd9e44ae59b22d0fde460: 2024-11-24T16:54:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,822 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,822 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,822 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-24T16:54:32,822 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T16:54:32,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-24T16:54:32,825 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=a77fea3a556068705b99707d91aa713b, daughterB=b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:32,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=a77fea3a556068705b99707d91aa713b, daughterB=b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:32,827 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=a77fea3a556068705b99707d91aa713b, daughterB=b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:32,827 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=a77fea3a556068705b99707d91aa713b, daughterB=b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:32,828 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] assignment.AssignmentManager(1363): Split request from e335232f7491,42875,1732467247444, parent={ENCODED => e3ca43f3124bd9e44ae59b22d0fde460, NAME => 'TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T16:54:32,829 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=e335232f7491,42875,1732467247444 2024-11-24T16:54:32,830 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35783 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 2024-11-24T16:54:32,830 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 held by pid=7 2024-11-24T16:54:32,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, UNASSIGN}] 2024-11-24T16:54:32,838 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-24T16:54:32,838 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, UNASSIGN 2024-11-24T16:54:32,838 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 2024-11-24T16:54:32,840 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e3ca43f3124bd9e44ae59b22d0fde460, regionState=CLOSING, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:32,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, UNASSIGN because future has completed 2024-11-24T16:54:32,843 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-24T16:54:32,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444}] 2024-11-24T16:54:32,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:32,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:33,001 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,001 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-24T16:54:33,001 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing e3ca43f3124bd9e44ae59b22d0fde460, disabling compactions & flushes 2024-11-24T16:54:33,001 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:33,001 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:33,001 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. after waiting 0 ms 2024-11-24T16:54:33,001 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:33,002 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing e3ca43f3124bd9e44ae59b22d0fde460 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-24T16:54:33,006 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/a8380a2054e74188bc0faa3add0b983b is 1080, key is row0095/info:/1732467272795/Put/seqid=0 2024-11-24T16:54:33,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741846_1022 (size=7112) 2024-11-24T16:54:33,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741846_1022 (size=7112) 2024-11-24T16:54:33,012 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/a8380a2054e74188bc0faa3add0b983b 2024-11-24T16:54:33,018 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/.tmp/info/a8380a2054e74188bc0faa3add0b983b as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/a8380a2054e74188bc0faa3add0b983b 2024-11-24T16:54:33,023 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/a8380a2054e74188bc0faa3add0b983b, entries=2, sequenceid=123, filesize=6.9 K 2024-11-24T16:54:33,024 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for e3ca43f3124bd9e44ae59b22d0fde460 in 22ms, sequenceid=123, compaction requested=true 2024-11-24T16:54:33,025 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/645347b5201c468e9c51be0b8d4f5bbf, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/ffe345ad7899457fb450f71f77a69f99, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/424be6fb3f5f42edab5cda05478cb442, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/919f0afa46894040ad401aed77e315dd] to archive 2024-11-24T16:54:33,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T16:54:33,028 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/645347b5201c468e9c51be0b8d4f5bbf to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/645347b5201c468e9c51be0b8d4f5bbf 2024-11-24T16:54:33,029 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f1f9358cddf947cfa596ba0c24a97e27 2024-11-24T16:54:33,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/8e84d2704efc471da649f7efe2c4e6a9 2024-11-24T16:54:33,032 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/ffe345ad7899457fb450f71f77a69f99 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/ffe345ad7899457fb450f71f77a69f99 2024-11-24T16:54:33,033 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/424be6fb3f5f42edab5cda05478cb442 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/424be6fb3f5f42edab5cda05478cb442 2024-11-24T16:54:33,034 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/919f0afa46894040ad401aed77e315dd to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/919f0afa46894040ad401aed77e315dd 2024-11-24T16:54:33,041 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-24T16:54:33,042 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. 2024-11-24T16:54:33,042 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for e3ca43f3124bd9e44ae59b22d0fde460: Waiting for close lock at 1732467273001Running coprocessor pre-close hooks at 1732467273001Disabling compacts and flushes for region at 1732467273001Disabling writes for close at 1732467273001Obtaining lock to block concurrent updates at 1732467273002 (+1 ms)Preparing flush snapshotting stores in e3ca43f3124bd9e44ae59b22d0fde460 at 1732467273002Finished memstore snapshotting TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1732467273002Flushing stores of TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. at 1732467273003 (+1 ms)Flushing e3ca43f3124bd9e44ae59b22d0fde460/info: creating writer at 1732467273003Flushing e3ca43f3124bd9e44ae59b22d0fde460/info: appending metadata at 1732467273006 (+3 ms)Flushing e3ca43f3124bd9e44ae59b22d0fde460/info: closing flushed file at 1732467273006Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28bc7500: reopening flushed file at 1732467273017 (+11 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for e3ca43f3124bd9e44ae59b22d0fde460 in 22ms, sequenceid=123, compaction requested=true at 1732467273024 (+7 ms)Writing region close event to WAL at 1732467273037 (+13 ms)Running coprocessor post-close hooks at 1732467273041 (+4 ms)Closed at 1732467273041 2024-11-24T16:54:33,044 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,045 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e3ca43f3124bd9e44ae59b22d0fde460, regionState=CLOSED 2024-11-24T16:54:33,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 because future has completed 2024-11-24T16:54:33,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T16:54:33,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure e3ca43f3124bd9e44ae59b22d0fde460, server=e335232f7491,42875,1732467247444 in 205 msec 2024-11-24T16:54:33,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-24T16:54:33,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3ca43f3124bd9e44ae59b22d0fde460, UNASSIGN in 215 msec 2024-11-24T16:54:33,061 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:33,065 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=e3ca43f3124bd9e44ae59b22d0fde460, threads=4 2024-11-24T16:54:33,067 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/b05046857f9d4081b75c3401e51d47c8 for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,067 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343 for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,067 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/a8380a2054e74188bc0faa3add0b983b for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,068 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f8eed75c65454bc4b9b194c983b6fa00 for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,078 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/b05046857f9d4081b75c3401e51d47c8, top=true 2024-11-24T16:54:33,080 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/a8380a2054e74188bc0faa3add0b983b, top=true 2024-11-24T16:54:33,081 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f8eed75c65454bc4b9b194c983b6fa00, top=true 2024-11-24T16:54:33,091 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8 for child: b027cc4d524f38ce823f856963ba6e3a, parent: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,091 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00 for child: b027cc4d524f38ce823f856963ba6e3a, parent: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,091 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/b05046857f9d4081b75c3401e51d47c8 for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,092 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/f8eed75c65454bc4b9b194c983b6fa00 for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,092 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b for child: b027cc4d524f38ce823f856963ba6e3a, parent: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,092 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/a8380a2054e74188bc0faa3add0b983b for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741847_1023 (size=27) 2024-11-24T16:54:33,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741847_1023 (size=27) 2024-11-24T16:54:33,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741848_1024 (size=27) 2024-11-24T16:54:33,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741848_1024 (size=27) 2024-11-24T16:54:33,103 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343 for region: e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:54:33,106 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region e3ca43f3124bd9e44ae59b22d0fde460 Daughter A: [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460] storefiles, Daughter B: [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00] storefiles. 2024-11-24T16:54:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741849_1025 (size=71) 2024-11-24T16:54:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741849_1025 (size=71) 2024-11-24T16:54:33,118 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:33,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741850_1026 (size=71) 2024-11-24T16:54:33,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741850_1026 (size=71) 2024-11-24T16:54:33,133 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:33,144 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-24T16:54:33,156 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-24T16:54:33,159 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732467273159"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732467273159"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732467273159"}]},"ts":"1732467273159"} 2024-11-24T16:54:33,160 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732467273159"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467273159"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732467273159"}]},"ts":"1732467273159"} 2024-11-24T16:54:33,160 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732467273159"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732467273159"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732467273159"}]},"ts":"1732467273159"} 2024-11-24T16:54:33,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a77fea3a556068705b99707d91aa713b, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b027cc4d524f38ce823f856963ba6e3a, ASSIGN}] 2024-11-24T16:54:33,183 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a77fea3a556068705b99707d91aa713b, ASSIGN 2024-11-24T16:54:33,184 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b027cc4d524f38ce823f856963ba6e3a, ASSIGN 2024-11-24T16:54:33,184 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a77fea3a556068705b99707d91aa713b, ASSIGN; state=SPLITTING_NEW, location=e335232f7491,42875,1732467247444; forceNewPlan=false, retain=false 2024-11-24T16:54:33,185 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b027cc4d524f38ce823f856963ba6e3a, ASSIGN; state=SPLITTING_NEW, location=e335232f7491,42875,1732467247444; forceNewPlan=false, retain=false 2024-11-24T16:54:33,335 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a77fea3a556068705b99707d91aa713b, regionState=OPENING, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:33,335 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=b027cc4d524f38ce823f856963ba6e3a, regionState=OPENING, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:33,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a77fea3a556068705b99707d91aa713b, ASSIGN because future has completed 2024-11-24T16:54:33,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a77fea3a556068705b99707d91aa713b, server=e335232f7491,42875,1732467247444}] 2024-11-24T16:54:33,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b027cc4d524f38ce823f856963ba6e3a, ASSIGN because future has completed 2024-11-24T16:54:33,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444}] 2024-11-24T16:54:33,494 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:33,495 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => b027cc4d524f38ce823f856963ba6e3a, NAME => 'TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-24T16:54:33,495 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,495 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:33,495 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,495 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,496 INFO [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,497 INFO [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b027cc4d524f38ce823f856963ba6e3a columnFamilyName info 2024-11-24T16:54:33,497 DEBUG [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:33,507 DEBUG [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460->hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343-top 2024-11-24T16:54:33,512 DEBUG [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b 2024-11-24T16:54:33,516 DEBUG [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8 2024-11-24T16:54:33,520 DEBUG [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00 2024-11-24T16:54:33,521 INFO [StoreOpener-b027cc4d524f38ce823f856963ba6e3a-1 {}] regionserver.HStore(327): Store=b027cc4d524f38ce823f856963ba6e3a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:54:33,521 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,522 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,523 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,523 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,523 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,525 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,526 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened b027cc4d524f38ce823f856963ba6e3a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708438, jitterRate=-0.09917497634887695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:54:33,526 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:33,527 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for b027cc4d524f38ce823f856963ba6e3a: Running coprocessor pre-open hook at 1732467273495Writing region info on filesystem at 1732467273495Initializing all the Stores at 1732467273496 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467273496Cleaning up temporary data from old regions at 1732467273523 (+27 ms)Running coprocessor post-open hooks at 1732467273526 (+3 ms)Region opened successfully at 1732467273527 (+1 ms) 2024-11-24T16:54:33,528 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., pid=14, masterSystemTime=1732467273490 2024-11-24T16:54:33,528 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:54:33,528 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-24T16:54:33,528 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:33,529 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:33,530 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:54:33,530 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:33,530 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460->hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343-top, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=120.8 K 2024-11-24T16:54:33,530 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:33,530 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:33,530 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732467258479 2024-11-24T16:54:33,530 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:54:33,531 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => a77fea3a556068705b99707d91aa713b, NAME => 'TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-24T16:54:33,531 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732467272735 2024-11-24T16:54:33,531 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,531 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:54:33,531 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=b027cc4d524f38ce823f856963ba6e3a, regionState=OPEN, openSeqNum=127, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:33,531 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,531 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,531 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732467272761 2024-11-24T16:54:33,532 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732467272795 2024-11-24T16:54:33,533 INFO [StoreOpener-a77fea3a556068705b99707d91aa713b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,533 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-24T16:54:33,533 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-24T16:54:33,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-24T16:54:33,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 because future has completed 2024-11-24T16:54:33,534 INFO [StoreOpener-a77fea3a556068705b99707d91aa713b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a77fea3a556068705b99707d91aa713b columnFamilyName info 2024-11-24T16:54:33,534 DEBUG [StoreOpener-a77fea3a556068705b99707d91aa713b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:54:33,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-24T16:54:33,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 in 196 msec 2024-11-24T16:54:33,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b027cc4d524f38ce823f856963ba6e3a, ASSIGN in 356 msec 2024-11-24T16:54:33,551 DEBUG [StoreOpener-a77fea3a556068705b99707d91aa713b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460->hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343-bottom 2024-11-24T16:54:33,551 INFO [StoreOpener-a77fea3a556068705b99707d91aa713b-1 {}] regionserver.HStore(327): Store=a77fea3a556068705b99707d91aa713b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:54:33,551 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,552 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,553 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,553 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,554 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/info/999b85b39468429db376636f5eec9faf is 193, key is TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a./info:regioninfo/1732467273531/Put/seqid=0 2024-11-24T16:54:33,556 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,557 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened a77fea3a556068705b99707d91aa713b; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763383, jitterRate=-0.029309362173080444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T16:54:33,557 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a77fea3a556068705b99707d91aa713b 2024-11-24T16:54:33,557 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for a77fea3a556068705b99707d91aa713b: Running coprocessor pre-open hook at 1732467273531Writing region info on filesystem at 1732467273531Initializing all the Stores at 1732467273533 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467273533Cleaning up temporary data from old regions at 1732467273554 (+21 ms)Running coprocessor post-open hooks at 1732467273557 (+3 ms)Region opened successfully at 1732467273557 2024-11-24T16:54:33,558 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b., pid=13, masterSystemTime=1732467273490 2024-11-24T16:54:33,558 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store a77fea3a556068705b99707d91aa713b:info, priority=-2147483648, current under compaction store size is 2 2024-11-24T16:54:33,558 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:33,558 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T16:54:33,559 INFO [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:54:33,559 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HStore(1541): a77fea3a556068705b99707d91aa713b/info is initiating minor compaction (all files) 2024-11-24T16:54:33,559 INFO [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a77fea3a556068705b99707d91aa713b/info in TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:54:33,559 INFO [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460->hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343-bottom] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/.tmp, totalSize=73.6 K 2024-11-24T16:54:33,560 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] compactions.Compactor(225): Compacting 7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732467258479 2024-11-24T16:54:33,561 DEBUG [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:54:33,561 INFO [RS_OPEN_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:54:33,562 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a77fea3a556068705b99707d91aa713b, regionState=OPEN, openSeqNum=127, regionLocation=e335232f7491,42875,1732467247444 2024-11-24T16:54:33,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a77fea3a556068705b99707d91aa713b, server=e335232f7491,42875,1732467247444 because future has completed 2024-11-24T16:54:33,565 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#66 average throughput is 11.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:33,565 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/f15a29a26ebe4e409ba4537c2c63ebae is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:54:33,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741851_1027 (size=9882) 2024-11-24T16:54:33,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741851_1027 (size=9882) 2024-11-24T16:54:33,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/info/999b85b39468429db376636f5eec9faf 2024-11-24T16:54:33,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-24T16:54:33,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure a77fea3a556068705b99707d91aa713b, server=e335232f7491,42875,1732467247444 in 229 msec 2024-11-24T16:54:33,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-24T16:54:33,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a77fea3a556068705b99707d91aa713b, ASSIGN in 389 msec 2024-11-24T16:54:33,576 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=a77fea3a556068705b99707d91aa713b, daughterB=b027cc4d524f38ce823f856963ba6e3a in 754 msec 2024-11-24T16:54:33,576 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 2024-11-24T16:54:33,576 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 2024-11-24T16:54:33,576 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 2024-11-24T16:54:33,577 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => e3ca43f3124bd9e44ae59b22d0fde460, NAME => 'TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-24T16:54:33,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e3ca43f3124bd9e44ae59b22d0fde460, daughterA=1ca357fae8f9bdce482e9481e0a54030, daughterB=aee506da2524d1dbb5fefc576cf27278 in 748 msec 2024-11-24T16:54:33,582 INFO [RS:0;e335232f7491:42875-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a77fea3a556068705b99707d91aa713b#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:33,583 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/.tmp/info/6b59a34913284f9d8b14c4c39ea6b6c7 is 1080, key is row0001/info:/1732467258479/Put/seqid=0 2024-11-24T16:54:33,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741852_1028 (size=43081) 2024-11-24T16:54:33,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741852_1028 (size=43081) 2024-11-24T16:54:33,594 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/f15a29a26ebe4e409ba4537c2c63ebae as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/f15a29a26ebe4e409ba4537c2c63ebae 2024-11-24T16:54:33,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/ns/7ae789107c374a7883b3c48f07040fec is 43, key is default/ns:d/1732467248288/Put/seqid=0 2024-11-24T16:54:33,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741853_1029 (size=70862) 2024-11-24T16:54:33,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741853_1029 (size=70862) 2024-11-24T16:54:33,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741854_1030 (size=5153) 2024-11-24T16:54:33,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741854_1030 (size=5153) 2024-11-24T16:54:33,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/ns/7ae789107c374a7883b3c48f07040fec 2024-11-24T16:54:33,601 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into f15a29a26ebe4e409ba4537c2c63ebae(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:33,601 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:33,601 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=12, startTime=1732467273528; duration=0sec 2024-11-24T16:54:33,601 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:33,601 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:54:33,604 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/.tmp/info/6b59a34913284f9d8b14c4c39ea6b6c7 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/6b59a34913284f9d8b14c4c39ea6b6c7 2024-11-24T16:54:33,610 INFO [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in a77fea3a556068705b99707d91aa713b/info of a77fea3a556068705b99707d91aa713b into 6b59a34913284f9d8b14c4c39ea6b6c7(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:33,610 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a77fea3a556068705b99707d91aa713b: 2024-11-24T16:54:33,610 INFO [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b., storeName=a77fea3a556068705b99707d91aa713b/info, priority=15, startTime=1732467273558; duration=0sec 2024-11-24T16:54:33,610 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:33,610 DEBUG [RS:0;e335232f7491:42875-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a77fea3a556068705b99707d91aa713b:info 2024-11-24T16:54:33,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/table/f869c705266f40eabb09fe29b598c734 is 65, key is TestLogRolling-testLogRolling/table:state/1732467248738/Put/seqid=0 2024-11-24T16:54:33,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741855_1031 (size=5340) 2024-11-24T16:54:33,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741855_1031 (size=5340) 2024-11-24T16:54:33,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/table/f869c705266f40eabb09fe29b598c734 2024-11-24T16:54:33,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/info/999b85b39468429db376636f5eec9faf as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/info/999b85b39468429db376636f5eec9faf 2024-11-24T16:54:33,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/info/999b85b39468429db376636f5eec9faf, entries=30, sequenceid=17, filesize=9.7 K 2024-11-24T16:54:33,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/ns/7ae789107c374a7883b3c48f07040fec as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/ns/7ae789107c374a7883b3c48f07040fec 2024-11-24T16:54:33,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/ns/7ae789107c374a7883b3c48f07040fec, entries=2, sequenceid=17, filesize=5.0 K 2024-11-24T16:54:33,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/table/f869c705266f40eabb09fe29b598c734 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/table/f869c705266f40eabb09fe29b598c734 2024-11-24T16:54:33,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/table/f869c705266f40eabb09fe29b598c734, entries=2, sequenceid=17, filesize=5.2 K 2024-11-24T16:54:33,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 117ms, sequenceid=17, compaction requested=false 2024-11-24T16:54:33,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T16:54:33,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:33,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:34,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32774 deadline: 1732467284799, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. is not online on e335232f7491,42875,1732467247444 2024-11-24T16:54:34,801 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. is not online on e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:34,801 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460. is not online on e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:34,801 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732467248374.e3ca43f3124bd9e44ae59b22d0fde460., hostname=e335232f7491,42875,1732467247444, seqNum=2 from cache 2024-11-24T16:54:34,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:34,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:35,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:35,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:36,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:36,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:37,367 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:54:37,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:37,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:38,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,578 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:54:38,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:54:38,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:38,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:39,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:39,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:40,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:40,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:41,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:41,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:42,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:42,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:43,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:43,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:44,909 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127] 2024-11-24T16:54:44,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:44,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:44,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:44,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:54:44,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/a5b9dc39e8624d11a672df413534f74f is 1080, key is row0097/info:/1732467284910/Put/seqid=0 2024-11-24T16:54:44,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741856_1032 (size=12516) 2024-11-24T16:54:44,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741856_1032 (size=12516) 2024-11-24T16:54:44,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/a5b9dc39e8624d11a672df413534f74f 2024-11-24T16:54:44,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/a5b9dc39e8624d11a672df413534f74f as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5b9dc39e8624d11a672df413534f74f 2024-11-24T16:54:44,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5b9dc39e8624d11a672df413534f74f, entries=7, sequenceid=137, filesize=12.2 K 2024-11-24T16:54:44,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for b027cc4d524f38ce823f856963ba6e3a in 26ms, sequenceid=137, compaction requested=false 2024-11-24T16:54:44,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:44,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:44,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T16:54:44,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e2df136d60b440fe9da377f44550f2e3 is 1080, key is row0104/info:/1732467284923/Put/seqid=0 2024-11-24T16:54:44,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741857_1033 (size=21156) 2024-11-24T16:54:44,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741857_1033 (size=21156) 2024-11-24T16:54:44,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e2df136d60b440fe9da377f44550f2e3 2024-11-24T16:54:44,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e2df136d60b440fe9da377f44550f2e3 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e2df136d60b440fe9da377f44550f2e3 2024-11-24T16:54:44,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e2df136d60b440fe9da377f44550f2e3, entries=15, sequenceid=155, filesize=20.7 K 2024-11-24T16:54:44,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for b027cc4d524f38ce823f856963ba6e3a in 23ms, sequenceid=155, compaction requested=true 2024-11-24T16:54:44,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:44,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:54:44,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:44,973 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:54:44,974 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:54:44,975 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:54:44,975 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:44,975 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/f15a29a26ebe4e409ba4537c2c63ebae, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5b9dc39e8624d11a672df413534f74f, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e2df136d60b440fe9da377f44550f2e3] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=75.0 K 2024-11-24T16:54:44,975 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting f15a29a26ebe4e409ba4537c2c63ebae, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732467270725 2024-11-24T16:54:44,976 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5b9dc39e8624d11a672df413534f74f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732467284910 2024-11-24T16:54:44,976 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2df136d60b440fe9da377f44550f2e3, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732467284923 2024-11-24T16:54:44,989 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#72 average throughput is 29.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:44,989 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/8898740904934da5a0a8d6ad5e748b47 is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:54:45,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741858_1034 (size=66967) 2024-11-24T16:54:45,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741858_1034 (size=66967) 2024-11-24T16:54:45,026 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/8898740904934da5a0a8d6ad5e748b47 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8898740904934da5a0a8d6ad5e748b47 2024-11-24T16:54:45,032 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into 8898740904934da5a0a8d6ad5e748b47(size=65.4 K), total size for store is 65.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:45,032 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:45,032 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=13, startTime=1732467284973; duration=0sec 2024-11-24T16:54:45,033 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:45,033 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:54:45,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:45,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:46,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:46,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:46,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:46,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T16:54:46,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/71da3c3fddcb46e2a25ffc2658827f0e is 1080, key is row0119/info:/1732467284951/Put/seqid=0 2024-11-24T16:54:46,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741859_1035 (size=16828) 2024-11-24T16:54:46,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741859_1035 (size=16828) 2024-11-24T16:54:46,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/71da3c3fddcb46e2a25ffc2658827f0e 2024-11-24T16:54:46,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/71da3c3fddcb46e2a25ffc2658827f0e as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/71da3c3fddcb46e2a25ffc2658827f0e 2024-11-24T16:54:46,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/71da3c3fddcb46e2a25ffc2658827f0e, entries=11, sequenceid=170, filesize=16.4 K 2024-11-24T16:54:47,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for b027cc4d524f38ce823f856963ba6e3a in 29ms, sequenceid=170, compaction requested=false 2024-11-24T16:54:47,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:47,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T16:54:47,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/fb331ffc421742099bf3c3a02b172aca is 1080, key is row0130/info:/1732467286972/Put/seqid=0 2024-11-24T16:54:47,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741860_1036 (size=21156) 2024-11-24T16:54:47,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741860_1036 (size=21156) 2024-11-24T16:54:47,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/fb331ffc421742099bf3c3a02b172aca 2024-11-24T16:54:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/fb331ffc421742099bf3c3a02b172aca as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/fb331ffc421742099bf3c3a02b172aca 2024-11-24T16:54:47,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T16:54:47,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32774 deadline: 1732467297035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 2024-11-24T16:54:47,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/fb331ffc421742099bf3c3a02b172aca, entries=15, sequenceid=188, filesize=20.7 K 2024-11-24T16:54:47,037 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:47,037 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:47,037 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 because the exception is null or not the one we care about 2024-11-24T16:54:47,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for b027cc4d524f38ce823f856963ba6e3a in 36ms, sequenceid=188, compaction requested=true 2024-11-24T16:54:47,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:47,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:54:47,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:47,038 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:54:47,039 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104951 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:54:47,040 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:54:47,040 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:47,040 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8898740904934da5a0a8d6ad5e748b47, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/71da3c3fddcb46e2a25ffc2658827f0e, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/fb331ffc421742099bf3c3a02b172aca] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=102.5 K 2024-11-24T16:54:47,040 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8898740904934da5a0a8d6ad5e748b47, keycount=57, bloomtype=ROW, size=65.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732467270725 2024-11-24T16:54:47,043 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 71da3c3fddcb46e2a25ffc2658827f0e, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732467284951 2024-11-24T16:54:47,043 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb331ffc421742099bf3c3a02b172aca, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732467286972 2024-11-24T16:54:47,069 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#75 average throughput is 28.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:47,069 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/c65edbccc0e04d859404491b6e885f63 is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:54:47,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741861_1037 (size=95174) 2024-11-24T16:54:47,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741861_1037 (size=95174) 2024-11-24T16:54:47,096 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/c65edbccc0e04d859404491b6e885f63 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/c65edbccc0e04d859404491b6e885f63 2024-11-24T16:54:47,106 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into c65edbccc0e04d859404491b6e885f63(size=92.9 K), total size for store is 92.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:47,107 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:47,107 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=13, startTime=1732467287038; duration=0sec 2024-11-24T16:54:47,107 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:47,107 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:54:47,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:47,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:48,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:48,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:48,924 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T16:54:48,924 INFO [master/e335232f7491:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T16:54:49,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:49,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:50,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:50,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:51,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:51,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:52,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:52,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:53,253 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-24T16:54:53,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:53,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:54,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:54,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:55,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:55,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:56,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:56,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:57,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T16:54:57,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/0c08246219aa4a5ca301081fdaf6aa1b is 1080, key is row0145/info:/1732467287003/Put/seqid=0 2024-11-24T16:54:57,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741862_1038 (size=21156) 2024-11-24T16:54:57,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741862_1038 (size=21156) 2024-11-24T16:54:57,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/0c08246219aa4a5ca301081fdaf6aa1b 2024-11-24T16:54:57,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/0c08246219aa4a5ca301081fdaf6aa1b as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/0c08246219aa4a5ca301081fdaf6aa1b 2024-11-24T16:54:57,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/0c08246219aa4a5ca301081fdaf6aa1b, entries=15, sequenceid=207, filesize=20.7 K 2024-11-24T16:54:57,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for b027cc4d524f38ce823f856963ba6e3a in 21ms, sequenceid=207, compaction requested=false 2024-11-24T16:54:57,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:57,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:57,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:58,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:58,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:59,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:54:59,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:54:59,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/10956faa64ec49968ff1a8764e1708a7 is 1080, key is row0160/info:/1732467297064/Put/seqid=0 2024-11-24T16:54:59,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T16:54:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32774 deadline: 1732467309109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 2024-11-24T16:54:59,110 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:59,110 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:54:59,110 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 because the exception is null or not the one we care about 2024-11-24T16:54:59,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741863_1039 (size=12516) 2024-11-24T16:54:59,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741863_1039 (size=12516) 2024-11-24T16:54:59,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/10956faa64ec49968ff1a8764e1708a7 2024-11-24T16:54:59,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/10956faa64ec49968ff1a8764e1708a7 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/10956faa64ec49968ff1a8764e1708a7 2024-11-24T16:54:59,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/10956faa64ec49968ff1a8764e1708a7, entries=7, sequenceid=217, filesize=12.2 K 2024-11-24T16:54:59,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b027cc4d524f38ce823f856963ba6e3a in 51ms, sequenceid=217, compaction requested=true 2024-11-24T16:54:59,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:59,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:54:59,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:59,127 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:54:59,128 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:54:59,128 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:54:59,128 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:54:59,128 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/c65edbccc0e04d859404491b6e885f63, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/0c08246219aa4a5ca301081fdaf6aa1b, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/10956faa64ec49968ff1a8764e1708a7] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=125.8 K 2024-11-24T16:54:59,128 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting c65edbccc0e04d859404491b6e885f63, keycount=83, bloomtype=ROW, size=92.9 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732467270725 2024-11-24T16:54:59,129 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c08246219aa4a5ca301081fdaf6aa1b, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732467287003 2024-11-24T16:54:59,129 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 10956faa64ec49968ff1a8764e1708a7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732467297064 2024-11-24T16:54:59,141 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#78 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:54:59,142 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/8450b6cce8cd4c35b2ed376cfd6d4195 is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:54:59,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741864_1040 (size=118996) 2024-11-24T16:54:59,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741864_1040 (size=118996) 2024-11-24T16:54:59,153 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/8450b6cce8cd4c35b2ed376cfd6d4195 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8450b6cce8cd4c35b2ed376cfd6d4195 2024-11-24T16:54:59,159 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into 8450b6cce8cd4c35b2ed376cfd6d4195(size=116.2 K), total size for store is 116.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:54:59,159 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:54:59,159 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=13, startTime=1732467299127; duration=0sec 2024-11-24T16:54:59,159 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:54:59,159 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:54:59,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:54:59,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:00,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:00,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:01,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:01,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:02,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:02,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:03,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:03,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:04,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:04,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:05,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:05,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:06,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:06,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:07,367 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T16:55:07,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:07,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:08,303 DEBUG [master/e335232f7491:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-24T16:55:08,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:08,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:09,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:09,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T16:55:09,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b44937d5817d4c9fbe0e8d982db8b90a is 1080, key is row0167/info:/1732467299077/Put/seqid=0 2024-11-24T16:55:09,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741865_1041 (size=29784) 2024-11-24T16:55:09,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741865_1041 (size=29784) 2024-11-24T16:55:09,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b44937d5817d4c9fbe0e8d982db8b90a 2024-11-24T16:55:09,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b44937d5817d4c9fbe0e8d982db8b90a as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b44937d5817d4c9fbe0e8d982db8b90a 2024-11-24T16:55:09,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b44937d5817d4c9fbe0e8d982db8b90a, entries=23, sequenceid=244, filesize=29.1 K 2024-11-24T16:55:09,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for b027cc4d524f38ce823f856963ba6e3a in 22ms, sequenceid=244, compaction requested=false 2024-11-24T16:55:09,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:09,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:09,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:10,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:10,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:11,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:55:11,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/bbee5ef38103473cb7d295b590e9afc0 is 1080, key is row0190/info:/1732467309124/Put/seqid=0 2024-11-24T16:55:11,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741866_1042 (size=12516) 2024-11-24T16:55:11,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741866_1042 (size=12516) 2024-11-24T16:55:11,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/bbee5ef38103473cb7d295b590e9afc0 2024-11-24T16:55:11,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T16:55:11,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32774 deadline: 1732467321172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 2024-11-24T16:55:11,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:55:11,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:55:11,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 because the exception is null or not the one we care about 2024-11-24T16:55:11,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/bbee5ef38103473cb7d295b590e9afc0 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/bbee5ef38103473cb7d295b590e9afc0 2024-11-24T16:55:11,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/bbee5ef38103473cb7d295b590e9afc0, entries=7, sequenceid=254, filesize=12.2 K 2024-11-24T16:55:11,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b027cc4d524f38ce823f856963ba6e3a in 47ms, sequenceid=254, compaction requested=true 2024-11-24T16:55:11,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:11,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:55:11,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:55:11,184 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:55:11,185 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161296 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:55:11,185 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:55:11,185 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:11,185 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8450b6cce8cd4c35b2ed376cfd6d4195, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b44937d5817d4c9fbe0e8d982db8b90a, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/bbee5ef38103473cb7d295b590e9afc0] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=157.5 K 2024-11-24T16:55:11,185 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8450b6cce8cd4c35b2ed376cfd6d4195, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732467270725 2024-11-24T16:55:11,186 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting b44937d5817d4c9fbe0e8d982db8b90a, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732467299077 2024-11-24T16:55:11,186 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting bbee5ef38103473cb7d295b590e9afc0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732467309124 2024-11-24T16:55:11,197 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#81 average throughput is 69.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:55:11,198 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/df1b6ad98def42189f6139c3a42e4ef5 is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:55:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741867_1043 (size=151643) 2024-11-24T16:55:11,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741867_1043 (size=151643) 2024-11-24T16:55:11,208 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/df1b6ad98def42189f6139c3a42e4ef5 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/df1b6ad98def42189f6139c3a42e4ef5 2024-11-24T16:55:11,214 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into df1b6ad98def42189f6139c3a42e4ef5(size=148.1 K), total size for store is 148.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:55:11,215 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:11,215 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=13, startTime=1732467311183; duration=0sec 2024-11-24T16:55:11,215 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:55:11,215 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:55:11,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:11,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:12,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:12,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:13,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:13,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:14,719 DEBUG [master/e335232f7491:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=67, reuseRatio=88.16% 2024-11-24T16:55:14,719 DEBUG [master/e335232f7491:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-24T16:55:14,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:14,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:15,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:15,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:16,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:16,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:17,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:17,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:18,495 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b027cc4d524f38ce823f856963ba6e3a, had cached 0 bytes from a total of 151643 2024-11-24T16:55:18,531 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a77fea3a556068705b99707d91aa713b, had cached 0 bytes from a total of 70862 2024-11-24T16:55:18,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:18,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:19,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:19,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:20,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:20,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:21,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:21,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T16:55:21,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b418f5935acc44c795a64e1dddd94e7e is 1080, key is row0197/info:/1732467311137/Put/seqid=0 2024-11-24T16:55:21,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741868_1044 (size=29807) 2024-11-24T16:55:21,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741868_1044 (size=29807) 2024-11-24T16:55:21,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b418f5935acc44c795a64e1dddd94e7e 2024-11-24T16:55:21,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b418f5935acc44c795a64e1dddd94e7e as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b418f5935acc44c795a64e1dddd94e7e 2024-11-24T16:55:21,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b418f5935acc44c795a64e1dddd94e7e, entries=23, sequenceid=281, filesize=29.1 K 2024-11-24T16:55:21,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for b027cc4d524f38ce823f856963ba6e3a in 23ms, sequenceid=281, compaction requested=false 2024-11-24T16:55:21,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:21,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,782 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T16:55:21,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T16:55:21,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:21,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:22,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:22,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:23,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:23,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T16:55:23,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/943f1b7d12b543fbb35fdb193a625dd6 is 1080, key is row0220/info:/1732467321215/Put/seqid=0 2024-11-24T16:55:23,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741869_1045 (size=12523) 2024-11-24T16:55:23,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741869_1045 (size=12523) 2024-11-24T16:55:23,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/943f1b7d12b543fbb35fdb193a625dd6 2024-11-24T16:55:23,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/943f1b7d12b543fbb35fdb193a625dd6 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/943f1b7d12b543fbb35fdb193a625dd6 2024-11-24T16:55:23,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/943f1b7d12b543fbb35fdb193a625dd6, entries=7, sequenceid=291, filesize=12.2 K 2024-11-24T16:55:23,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for b027cc4d524f38ce823f856963ba6e3a in 21ms, sequenceid=291, compaction requested=true 2024-11-24T16:55:23,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:23,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:55:23,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:55:23,247 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:55:23,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:23,249 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 193973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:55:23,249 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:55:23,249 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:23,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T16:55:23,249 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/df1b6ad98def42189f6139c3a42e4ef5, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b418f5935acc44c795a64e1dddd94e7e, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/943f1b7d12b543fbb35fdb193a625dd6] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=189.4 K 2024-11-24T16:55:23,249 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting df1b6ad98def42189f6139c3a42e4ef5, keycount=135, bloomtype=ROW, size=148.1 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732467270725 2024-11-24T16:55:23,250 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting b418f5935acc44c795a64e1dddd94e7e, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732467311137 2024-11-24T16:55:23,250 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting 943f1b7d12b543fbb35fdb193a625dd6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732467321215 2024-11-24T16:55:23,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e805b51caa3d4cb68099a78c20c7bd1f is 1080, key is row0227/info:/1732467323227/Put/seqid=0 2024-11-24T16:55:23,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741870_1046 (size=20092) 2024-11-24T16:55:23,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741870_1046 (size=20092) 2024-11-24T16:55:23,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e805b51caa3d4cb68099a78c20c7bd1f 2024-11-24T16:55:23,269 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#85 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:55:23,269 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e0c80c0ba50f4d01ae00b4c6012aedac is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:55:23,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e805b51caa3d4cb68099a78c20c7bd1f as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e805b51caa3d4cb68099a78c20c7bd1f 2024-11-24T16:55:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741871_1047 (size=184123) 2024-11-24T16:55:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741871_1047 (size=184123) 2024-11-24T16:55:23,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T16:55:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32774 deadline: 1732467333274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 2024-11-24T16:55:23,275 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:55:23,276 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b027cc4d524f38ce823f856963ba6e3a, server=e335232f7491,42875,1732467247444 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T16:55:23,276 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., hostname=e335232f7491,42875,1732467247444, seqNum=127 because the exception is null or not the one we care about 2024-11-24T16:55:23,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e805b51caa3d4cb68099a78c20c7bd1f, entries=14, sequenceid=308, filesize=19.6 K 2024-11-24T16:55:23,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for b027cc4d524f38ce823f856963ba6e3a in 28ms, sequenceid=308, compaction requested=false 2024-11-24T16:55:23,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:23,279 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/e0c80c0ba50f4d01ae00b4c6012aedac as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e0c80c0ba50f4d01ae00b4c6012aedac 2024-11-24T16:55:23,284 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into e0c80c0ba50f4d01ae00b4c6012aedac(size=179.8 K), total size for store is 199.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:55:23,284 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:23,284 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=13, startTime=1732467323247; duration=0sec 2024-11-24T16:55:23,284 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:55:23,284 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:55:23,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:23,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:24,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:24,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:25,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:25,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:26,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:26,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:27,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:27,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:28,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:28,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:29,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:29,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:30,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:30,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:31,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:31,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:32,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:32,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42875 {}] regionserver.HRegion(8855): Flush requested on b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:33,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b027cc4d524f38ce823f856963ba6e3a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-24T16:55:33,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b0f70dfc59d04b2ebbcf4316b9813760 is 1080, key is row0241/info:/1732467323250/Put/seqid=0 2024-11-24T16:55:33,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741872_1048 (size=22254) 2024-11-24T16:55:33,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741872_1048 (size=22254) 2024-11-24T16:55:33,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b0f70dfc59d04b2ebbcf4316b9813760 2024-11-24T16:55:33,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/b0f70dfc59d04b2ebbcf4316b9813760 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b0f70dfc59d04b2ebbcf4316b9813760 2024-11-24T16:55:33,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b0f70dfc59d04b2ebbcf4316b9813760, entries=16, sequenceid=328, filesize=21.7 K 2024-11-24T16:55:33,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for b027cc4d524f38ce823f856963ba6e3a in 30ms, sequenceid=328, compaction requested=true 2024-11-24T16:55:33,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:33,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b027cc4d524f38ce823f856963ba6e3a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T16:55:33,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:55:33,384 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T16:55:33,384 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 226469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T16:55:33,384 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1541): b027cc4d524f38ce823f856963ba6e3a/info is initiating minor compaction (all files) 2024-11-24T16:55:33,384 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b027cc4d524f38ce823f856963ba6e3a/info in TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:33,385 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e0c80c0ba50f4d01ae00b4c6012aedac, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e805b51caa3d4cb68099a78c20c7bd1f, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b0f70dfc59d04b2ebbcf4316b9813760] into tmpdir=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp, totalSize=221.2 K 2024-11-24T16:55:33,385 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting e0c80c0ba50f4d01ae00b4c6012aedac, keycount=165, bloomtype=ROW, size=179.8 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732467270725 2024-11-24T16:55:33,385 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting e805b51caa3d4cb68099a78c20c7bd1f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732467323227 2024-11-24T16:55:33,386 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] compactions.Compactor(225): Compacting b0f70dfc59d04b2ebbcf4316b9813760, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732467323250 2024-11-24T16:55:33,396 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b027cc4d524f38ce823f856963ba6e3a#info#compaction#87 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T16:55:33,397 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/a5fcde1eba9143168bee5cdff9063b22 is 1080, key is row0062/info:/1732467270725/Put/seqid=0 2024-11-24T16:55:33,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741873_1049 (size=216672) 2024-11-24T16:55:33,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741873_1049 (size=216672) 2024-11-24T16:55:33,404 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/.tmp/info/a5fcde1eba9143168bee5cdff9063b22 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5fcde1eba9143168bee5cdff9063b22 2024-11-24T16:55:33,408 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b027cc4d524f38ce823f856963ba6e3a/info of b027cc4d524f38ce823f856963ba6e3a into a5fcde1eba9143168bee5cdff9063b22(size=211.6 K), total size for store is 211.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T16:55:33,408 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:33,408 INFO [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a., storeName=b027cc4d524f38ce823f856963ba6e3a/info, priority=13, startTime=1732467333383; duration=0sec 2024-11-24T16:55:33,409 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T16:55:33,409 DEBUG [RS:0;e335232f7491:42875-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b027cc4d524f38ce823f856963ba6e3a:info 2024-11-24T16:55:33,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:33,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:34,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:34,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:35,354 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-24T16:55:35,354 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C42875%2C1732467247444.1732467335354 2024-11-24T16:55:35,369 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,370 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,370 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444/e335232f7491%2C42875%2C1732467247444.1732467247844 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444/e335232f7491%2C42875%2C1732467247444.1732467335354 2024-11-24T16:55:35,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38963:38963),(127.0.0.1/127.0.0.1:37491:37491)] 2024-11-24T16:55:35,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444/e335232f7491%2C42875%2C1732467247444.1732467247844 is not closed yet, will try archiving it next time 2024-11-24T16:55:35,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741833_1009 (size=315283) 2024-11-24T16:55:35,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741833_1009 (size=315283) 2024-11-24T16:55:35,374 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a77fea3a556068705b99707d91aa713b: 2024-11-24T16:55:35,374 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-24T16:55:35,375 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/WALs/e335232f7491,42875,1732467247444/e335232f7491%2C42875%2C1732467247444.1732467247844 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/oldWALs/e335232f7491%2C42875%2C1732467247444.1732467247844 2024-11-24T16:55:35,378 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/info/a00ff4dfe542429883d25707f6d4beb2 is 186, key is TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b./info:regioninfo/1732467273562/Put/seqid=0 2024-11-24T16:55:35,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741875_1051 (size=6153) 2024-11-24T16:55:35,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741875_1051 (size=6153) 2024-11-24T16:55:35,382 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/info/a00ff4dfe542429883d25707f6d4beb2 2024-11-24T16:55:35,387 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/.tmp/info/a00ff4dfe542429883d25707f6d4beb2 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/info/a00ff4dfe542429883d25707f6d4beb2 2024-11-24T16:55:35,391 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/info/a00ff4dfe542429883d25707f6d4beb2, entries=5, sequenceid=21, filesize=6.0 K 2024-11-24T16:55:35,392 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-24T16:55:35,392 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T16:55:35,392 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b027cc4d524f38ce823f856963ba6e3a: 2024-11-24T16:55:35,393 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T16:55:35,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:55:35,393 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:55:35,393 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:55:35,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:35,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:35,393 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:55:35,393 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:55:35,393 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=744538088, stopped=false 2024-11-24T16:55:35,393 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,35783,1732467247387 2024-11-24T16:55:35,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:55:35,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:55:35,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:35,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:35,395 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:55:35,395 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:55:35,395 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:55:35,395 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:35,395 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:55:35,395 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:55:35,396 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,42875,1732467247444' ***** 2024-11-24T16:55:35,396 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:55:35,396 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(3091): Received CLOSE for a77fea3a556068705b99707d91aa713b 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(3091): Received CLOSE for b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(959): stopping server e335232f7491,42875,1732467247444 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:55:35,396 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a77fea3a556068705b99707d91aa713b, disabling compactions & flushes 2024-11-24T16:55:35,396 INFO [RS:0;e335232f7491:42875 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:42875. 2024-11-24T16:55:35,397 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:55:35,397 DEBUG [RS:0;e335232f7491:42875 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:55:35,397 DEBUG [RS:0;e335232f7491:42875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. after waiting 0 ms 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:55:35,397 INFO [RS:0;e335232f7491:42875 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:55:35,397 INFO [RS:0;e335232f7491:42875 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:55:35,397 INFO [RS:0;e335232f7491:42875 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:55:35,397 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:55:35,397 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-24T16:55:35,397 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1325): Online Regions={a77fea3a556068705b99707d91aa713b=TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b., 1588230740=hbase:meta,,1.1588230740, b027cc4d524f38ce823f856963ba6e3a=TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.} 2024-11-24T16:55:35,397 DEBUG [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a77fea3a556068705b99707d91aa713b, b027cc4d524f38ce823f856963ba6e3a 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:55:35,397 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:55:35,397 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:55:35,397 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460->hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343-bottom] to archive 2024-11-24T16:55:35,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T16:55:35,400 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:55:35,400 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e335232f7491:35783 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T16:55:35,400 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-24T16:55:35,401 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T16:55:35,402 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:55:35,402 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:55:35,402 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467335397Running coprocessor pre-close hooks at 1732467335397Disabling compacts and flushes for region at 1732467335397Disabling writes for close at 1732467335397Writing region close event to WAL at 1732467335398 (+1 ms)Running coprocessor post-close hooks at 1732467335402 (+4 ms)Closed at 1732467335402 2024-11-24T16:55:35,402 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:55:35,403 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/a77fea3a556068705b99707d91aa713b/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-24T16:55:35,404 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:55:35,404 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a77fea3a556068705b99707d91aa713b: Waiting for close lock at 1732467335396Running coprocessor pre-close hooks at 1732467335396Disabling compacts and flushes for region at 1732467335396Disabling writes for close at 1732467335397 (+1 ms)Writing region close event to WAL at 1732467335400 (+3 ms)Running coprocessor post-close hooks at 1732467335404 (+4 ms)Closed at 1732467335404 2024-11-24T16:55:35,404 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732467272820.a77fea3a556068705b99707d91aa713b. 2024-11-24T16:55:35,404 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b027cc4d524f38ce823f856963ba6e3a, disabling compactions & flushes 2024-11-24T16:55:35,404 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:35,404 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:35,404 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. after waiting 0 ms 2024-11-24T16:55:35,404 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:35,405 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460->hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/e3ca43f3124bd9e44ae59b22d0fde460/info/7e5e28a7334846a9a5b63f127ba2b343-top, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/f15a29a26ebe4e409ba4537c2c63ebae, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5b9dc39e8624d11a672df413534f74f, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8898740904934da5a0a8d6ad5e748b47, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e2df136d60b440fe9da377f44550f2e3, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/71da3c3fddcb46e2a25ffc2658827f0e, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/c65edbccc0e04d859404491b6e885f63, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/fb331ffc421742099bf3c3a02b172aca, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/0c08246219aa4a5ca301081fdaf6aa1b, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8450b6cce8cd4c35b2ed376cfd6d4195, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/10956faa64ec49968ff1a8764e1708a7, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b44937d5817d4c9fbe0e8d982db8b90a, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/df1b6ad98def42189f6139c3a42e4ef5, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/bbee5ef38103473cb7d295b590e9afc0, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b418f5935acc44c795a64e1dddd94e7e, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e0c80c0ba50f4d01ae00b4c6012aedac, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/943f1b7d12b543fbb35fdb193a625dd6, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e805b51caa3d4cb68099a78c20c7bd1f, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b0f70dfc59d04b2ebbcf4316b9813760] to archive 2024-11-24T16:55:35,405 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T16:55:35,407 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/7e5e28a7334846a9a5b63f127ba2b343.e3ca43f3124bd9e44ae59b22d0fde460 2024-11-24T16:55:35,408 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-f8eed75c65454bc4b9b194c983b6fa00 2024-11-24T16:55:35,409 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-b05046857f9d4081b75c3401e51d47c8 2024-11-24T16:55:35,410 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/f15a29a26ebe4e409ba4537c2c63ebae to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/f15a29a26ebe4e409ba4537c2c63ebae 2024-11-24T16:55:35,411 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/TestLogRolling-testLogRolling=e3ca43f3124bd9e44ae59b22d0fde460-a8380a2054e74188bc0faa3add0b983b 2024-11-24T16:55:35,412 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5b9dc39e8624d11a672df413534f74f to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/a5b9dc39e8624d11a672df413534f74f 2024-11-24T16:55:35,413 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8898740904934da5a0a8d6ad5e748b47 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8898740904934da5a0a8d6ad5e748b47 2024-11-24T16:55:35,414 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e2df136d60b440fe9da377f44550f2e3 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e2df136d60b440fe9da377f44550f2e3 2024-11-24T16:55:35,415 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/71da3c3fddcb46e2a25ffc2658827f0e to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/71da3c3fddcb46e2a25ffc2658827f0e 2024-11-24T16:55:35,416 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/c65edbccc0e04d859404491b6e885f63 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/c65edbccc0e04d859404491b6e885f63 2024-11-24T16:55:35,417 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/fb331ffc421742099bf3c3a02b172aca to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/fb331ffc421742099bf3c3a02b172aca 2024-11-24T16:55:35,418 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/0c08246219aa4a5ca301081fdaf6aa1b to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/0c08246219aa4a5ca301081fdaf6aa1b 2024-11-24T16:55:35,419 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8450b6cce8cd4c35b2ed376cfd6d4195 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/8450b6cce8cd4c35b2ed376cfd6d4195 2024-11-24T16:55:35,420 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/10956faa64ec49968ff1a8764e1708a7 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/10956faa64ec49968ff1a8764e1708a7 2024-11-24T16:55:35,421 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b44937d5817d4c9fbe0e8d982db8b90a to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b44937d5817d4c9fbe0e8d982db8b90a 2024-11-24T16:55:35,422 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/df1b6ad98def42189f6139c3a42e4ef5 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/df1b6ad98def42189f6139c3a42e4ef5 2024-11-24T16:55:35,423 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/bbee5ef38103473cb7d295b590e9afc0 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/bbee5ef38103473cb7d295b590e9afc0 2024-11-24T16:55:35,424 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b418f5935acc44c795a64e1dddd94e7e to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b418f5935acc44c795a64e1dddd94e7e 2024-11-24T16:55:35,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e0c80c0ba50f4d01ae00b4c6012aedac to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e0c80c0ba50f4d01ae00b4c6012aedac 2024-11-24T16:55:35,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/943f1b7d12b543fbb35fdb193a625dd6 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/943f1b7d12b543fbb35fdb193a625dd6 2024-11-24T16:55:35,426 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e805b51caa3d4cb68099a78c20c7bd1f to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/e805b51caa3d4cb68099a78c20c7bd1f 2024-11-24T16:55:35,427 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b0f70dfc59d04b2ebbcf4316b9813760 to hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/archive/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/info/b0f70dfc59d04b2ebbcf4316b9813760 2024-11-24T16:55:35,427 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f15a29a26ebe4e409ba4537c2c63ebae=43081, a5b9dc39e8624d11a672df413534f74f=12516, 8898740904934da5a0a8d6ad5e748b47=66967, e2df136d60b440fe9da377f44550f2e3=21156, 71da3c3fddcb46e2a25ffc2658827f0e=16828, c65edbccc0e04d859404491b6e885f63=95174, fb331ffc421742099bf3c3a02b172aca=21156, 0c08246219aa4a5ca301081fdaf6aa1b=21156, 8450b6cce8cd4c35b2ed376cfd6d4195=118996, 10956faa64ec49968ff1a8764e1708a7=12516, b44937d5817d4c9fbe0e8d982db8b90a=29784, df1b6ad98def42189f6139c3a42e4ef5=151643, bbee5ef38103473cb7d295b590e9afc0=12516, b418f5935acc44c795a64e1dddd94e7e=29807, e0c80c0ba50f4d01ae00b4c6012aedac=184123, 943f1b7d12b543fbb35fdb193a625dd6=12523, e805b51caa3d4cb68099a78c20c7bd1f=20092, b0f70dfc59d04b2ebbcf4316b9813760=22254] 2024-11-24T16:55:35,431 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/data/default/TestLogRolling-testLogRolling/b027cc4d524f38ce823f856963ba6e3a/recovered.edits/333.seqid, newMaxSeqId=333, maxSeqId=126 2024-11-24T16:55:35,431 INFO [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:35,431 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b027cc4d524f38ce823f856963ba6e3a: Waiting for close lock at 1732467335404Running coprocessor pre-close hooks at 1732467335404Disabling compacts and flushes for region at 1732467335404Disabling writes for close at 1732467335404Writing region close event to WAL at 1732467335428 (+24 ms)Running coprocessor post-close hooks at 1732467335431 (+3 ms)Closed at 1732467335431 2024-11-24T16:55:35,431 DEBUG [RS_CLOSE_REGION-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732467272820.b027cc4d524f38ce823f856963ba6e3a. 2024-11-24T16:55:35,597 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(976): stopping server e335232f7491,42875,1732467247444; all regions closed. 2024-11-24T16:55:35,598 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,598 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741834_1010 (size=8107) 2024-11-24T16:55:35,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741834_1010 (size=8107) 2024-11-24T16:55:35,607 DEBUG [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/oldWALs 2024-11-24T16:55:35,607 INFO [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C42875%2C1732467247444.meta:.meta(num 1732467248245) 2024-11-24T16:55:35,607 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,608 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,608 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,608 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,608 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741874_1050 (size=780) 2024-11-24T16:55:35,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741874_1050 (size=780) 2024-11-24T16:55:35,612 DEBUG [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/oldWALs 2024-11-24T16:55:35,612 INFO [RS:0;e335232f7491:42875 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C42875%2C1732467247444:(num 1732467335354) 2024-11-24T16:55:35,612 DEBUG [RS:0;e335232f7491:42875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:35,612 INFO [RS:0;e335232f7491:42875 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:55:35,612 INFO [RS:0;e335232f7491:42875 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:55:35,612 INFO [RS:0;e335232f7491:42875 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T16:55:35,612 INFO [RS:0;e335232f7491:42875 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:55:35,613 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:55:35,613 INFO [RS:0;e335232f7491:42875 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42875 2024-11-24T16:55:35,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,42875,1732467247444 2024-11-24T16:55:35,615 INFO [RS:0;e335232f7491:42875 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:55:35,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:55:35,616 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,42875,1732467247444] 2024-11-24T16:55:35,619 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,42875,1732467247444 already deleted, retry=false 2024-11-24T16:55:35,619 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,42875,1732467247444 expired; onlineServers=0 2024-11-24T16:55:35,619 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,35783,1732467247387' ***** 2024-11-24T16:55:35,619 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:55:35,619 INFO [M:0;e335232f7491:35783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:55:35,619 INFO [M:0;e335232f7491:35783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:55:35,619 DEBUG [M:0;e335232f7491:35783 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:55:35,620 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:55:35,620 DEBUG [M:0;e335232f7491:35783 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:55:35,620 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467247637 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467247637,5,FailOnTimeoutGroup] 2024-11-24T16:55:35,620 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467247638 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467247638,5,FailOnTimeoutGroup] 2024-11-24T16:55:35,620 INFO [M:0;e335232f7491:35783 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:55:35,620 INFO [M:0;e335232f7491:35783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:55:35,620 DEBUG [M:0;e335232f7491:35783 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:55:35,620 INFO [M:0;e335232f7491:35783 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:55:35,620 INFO [M:0;e335232f7491:35783 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:55:35,620 INFO [M:0;e335232f7491:35783 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:55:35,620 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:55:35,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:55:35,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:35,623 DEBUG [M:0;e335232f7491:35783 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-24T16:55:35,623 DEBUG [M:0;e335232f7491:35783 {}] master.ActiveMasterManager(353): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-24T16:55:35,623 INFO [M:0;e335232f7491:35783 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/.lastflushedseqids 2024-11-24T16:55:35,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741876_1052 (size=228) 2024-11-24T16:55:35,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741876_1052 (size=228) 2024-11-24T16:55:35,630 INFO [M:0;e335232f7491:35783 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:55:35,630 INFO [M:0;e335232f7491:35783 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:55:35,630 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:55:35,630 INFO [M:0;e335232f7491:35783 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:35,630 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:35,630 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:55:35,630 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:35,630 INFO [M:0;e335232f7491:35783 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.71 KB heapSize=65.93 KB 2024-11-24T16:55:35,653 DEBUG [M:0;e335232f7491:35783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/34d752853c9744529857a580524aacd6 is 82, key is hbase:meta,,1/info:regioninfo/1732467248269/Put/seqid=0 2024-11-24T16:55:35,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741877_1053 (size=5672) 2024-11-24T16:55:35,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741877_1053 (size=5672) 2024-11-24T16:55:35,659 INFO [M:0;e335232f7491:35783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/34d752853c9744529857a580524aacd6 2024-11-24T16:55:35,681 DEBUG [M:0;e335232f7491:35783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/068728c6adc1404aa645c721deb853ed is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732467248743/Put/seqid=0 2024-11-24T16:55:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741878_1054 (size=7681) 2024-11-24T16:55:35,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741878_1054 (size=7681) 2024-11-24T16:55:35,692 INFO [M:0;e335232f7491:35783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.11 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/068728c6adc1404aa645c721deb853ed 2024-11-24T16:55:35,699 INFO [M:0;e335232f7491:35783 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 068728c6adc1404aa645c721deb853ed 2024-11-24T16:55:35,708 INFO [regionserver/e335232f7491:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:55:35,716 DEBUG [M:0;e335232f7491:35783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0feae8eb49544ea4b315d6857902526a is 69, key is e335232f7491,42875,1732467247444/rs:state/1732467247690/Put/seqid=0 2024-11-24T16:55:35,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:35,716 INFO [RS:0;e335232f7491:42875 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:55:35,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42875-0x1014ae633990001, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:35,716 INFO [RS:0;e335232f7491:42875 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,42875,1732467247444; zookeeper connection closed. 2024-11-24T16:55:35,717 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fb134e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fb134e 2024-11-24T16:55:35,717 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T16:55:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741879_1055 (size=5156) 2024-11-24T16:55:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741879_1055 (size=5156) 2024-11-24T16:55:35,721 INFO [M:0;e335232f7491:35783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0feae8eb49544ea4b315d6857902526a 2024-11-24T16:55:35,741 DEBUG [M:0;e335232f7491:35783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9c30aa0ff17f44c09504846ba0009779 is 52, key is load_balancer_on/state:d/1732467248371/Put/seqid=0 2024-11-24T16:55:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741880_1056 (size=5056) 2024-11-24T16:55:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741880_1056 (size=5056) 2024-11-24T16:55:35,748 INFO [M:0;e335232f7491:35783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9c30aa0ff17f44c09504846ba0009779 2024-11-24T16:55:35,753 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/34d752853c9744529857a580524aacd6 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/34d752853c9744529857a580524aacd6 2024-11-24T16:55:35,758 INFO [M:0;e335232f7491:35783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/34d752853c9744529857a580524aacd6, entries=8, sequenceid=129, filesize=5.5 K 2024-11-24T16:55:35,759 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/068728c6adc1404aa645c721deb853ed as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/068728c6adc1404aa645c721deb853ed 2024-11-24T16:55:35,763 INFO [M:0;e335232f7491:35783 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 068728c6adc1404aa645c721deb853ed 2024-11-24T16:55:35,763 INFO [M:0;e335232f7491:35783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/068728c6adc1404aa645c721deb853ed, entries=14, sequenceid=129, filesize=7.5 K 2024-11-24T16:55:35,764 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0feae8eb49544ea4b315d6857902526a as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0feae8eb49544ea4b315d6857902526a 2024-11-24T16:55:35,769 INFO [M:0;e335232f7491:35783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0feae8eb49544ea4b315d6857902526a, entries=1, sequenceid=129, filesize=5.0 K 2024-11-24T16:55:35,769 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9c30aa0ff17f44c09504846ba0009779 as hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9c30aa0ff17f44c09504846ba0009779 2024-11-24T16:55:35,774 INFO [M:0;e335232f7491:35783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39069/user/jenkins/test-data/9cb65926-8ff6-7f4a-d54e-86cf601e4682/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9c30aa0ff17f44c09504846ba0009779, entries=1, sequenceid=129, filesize=4.9 K 2024-11-24T16:55:35,775 INFO [M:0;e335232f7491:35783 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.87 KB/67448, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=129, compaction requested=false 2024-11-24T16:55:35,777 INFO [M:0;e335232f7491:35783 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:35,777 DEBUG [M:0;e335232f7491:35783 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467335630Disabling compacts and flushes for region at 1732467335630Disabling writes for close at 1732467335630Obtaining lock to block concurrent updates at 1732467335630Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467335630Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54997, getHeapSize=67448, getOffHeapSize=0, getCellsCount=152 at 1732467335631 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467335631Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467335631Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467335653 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467335653Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467335664 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467335680 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467335680Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467335699 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467335715 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467335715Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467335726 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467335741 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467335741Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79faac70: reopening flushed file at 1732467335753 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d11775f: reopening flushed file at 1732467335758 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e5f27d2: reopening flushed file at 1732467335764 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e70ced: reopening flushed file at 1732467335769 (+5 ms)Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.87 KB/67448, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=129, compaction requested=false at 1732467335775 (+6 ms)Writing region close event to WAL at 1732467335777 (+2 ms)Closed at 1732467335777 2024-11-24T16:55:35,777 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,778 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,778 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,778 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,778 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:35,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40787 is added to blk_1073741830_1006 (size=63927) 2024-11-24T16:55:35,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43271 is added to blk_1073741830_1006 (size=63927) 2024-11-24T16:55:35,783 INFO [M:0;e335232f7491:35783 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:55:35,783 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:55:35,783 INFO [M:0;e335232f7491:35783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35783 2024-11-24T16:55:35,783 INFO [M:0;e335232f7491:35783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:55:35,886 INFO [M:0;e335232f7491:35783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:55:35,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:35,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35783-0x1014ae633990000, quorum=127.0.0.1:57206, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:35,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@629cd82f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:55:35,889 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f25c1cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:55:35,889 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:55:35,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b28e221{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:55:35,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ff7780b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir/,STOPPED} 2024-11-24T16:55:35,891 WARN [BP-963785250-172.17.0.2-1732467246566 heartbeating to localhost/127.0.0.1:39069 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:55:35,891 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:55:35,891 WARN [BP-963785250-172.17.0.2-1732467246566 heartbeating to localhost/127.0.0.1:39069 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-963785250-172.17.0.2-1732467246566 (Datanode Uuid 3deceb80-ed62-436c-8091-5ead47cbf088) service to localhost/127.0.0.1:39069 2024-11-24T16:55:35,891 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:55:35,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data3/current/BP-963785250-172.17.0.2-1732467246566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:35,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data4/current/BP-963785250-172.17.0.2-1732467246566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:35,892 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:55:35,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15c8c411{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:55:35,894 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d6e0cf8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:55:35,894 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:55:35,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79864455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:55:35,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9a438{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir/,STOPPED} 2024-11-24T16:55:35,896 WARN [BP-963785250-172.17.0.2-1732467246566 heartbeating to localhost/127.0.0.1:39069 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:55:35,896 WARN [BP-963785250-172.17.0.2-1732467246566 heartbeating to localhost/127.0.0.1:39069 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-963785250-172.17.0.2-1732467246566 (Datanode Uuid c3a4c52f-cf37-4df8-b529-238189abe78b) service to localhost/127.0.0.1:39069 2024-11-24T16:55:35,896 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:55:35,896 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:55:35,896 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data1/current/BP-963785250-172.17.0.2-1732467246566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:35,897 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/cluster_708ac12a-52ff-d10f-32fa-4b9804a92436/data/data2/current/BP-963785250-172.17.0.2-1732467246566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:35,897 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:55:35,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@152e40b0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:55:35,906 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@687d9ffd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:55:35,906 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:55:35,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27fb1a0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:55:35,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25570184{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir/,STOPPED} 2024-11-24T16:55:35,913 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:55:35,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:55:35,948 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 206) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39069 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39069 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39069 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39069 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39069 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=506 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=180 (was 210), ProcessCount=11 (was 11), AvailableMemoryMB=5408 (was 5702) 2024-11-24T16:55:35,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:35,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:35,956 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=180, ProcessCount=11, AvailableMemoryMB=5408 2024-11-24T16:55:35,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T16:55:35,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.log.dir so I do NOT create it in target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3fad447f-9f3a-a304-17fd-c1a416fe545f/hadoop.tmp.dir so I do NOT create it in target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b, deleteOnExit=true 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/test.cache.data in system properties and HBase conf 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir in system properties and HBase conf 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T16:55:35,957 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T16:55:35,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/nfs.dump.dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/java.io.tmpdir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T16:55:35,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T16:55:35,971 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:55:36,024 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:55:36,027 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:55:36,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:55:36,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:55:36,029 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:55:36,029 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:55:36,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d7a0e2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:55:36,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32d01bcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:55:36,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d175de5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/java.io.tmpdir/jetty-localhost-40017-hadoop-hdfs-3_4_1-tests_jar-_-any-18317745012596528967/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:55:36,143 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51af64d5{HTTP/1.1, (http/1.1)}{localhost:40017} 2024-11-24T16:55:36,143 INFO [Time-limited test {}] server.Server(415): Started @327105ms 2024-11-24T16:55:36,155 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T16:55:36,208 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:55:36,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:55:36,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:55:36,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:55:36,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T16:55:36,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24bb5ef7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:55:36,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8a9439{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:55:36,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@483384c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/java.io.tmpdir/jetty-localhost-40245-hadoop-hdfs-3_4_1-tests_jar-_-any-12086358182303477505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:55:36,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e70e237{HTTP/1.1, (http/1.1)}{localhost:40245} 2024-11-24T16:55:36,326 INFO [Time-limited test {}] server.Server(415): Started @327288ms 2024-11-24T16:55:36,327 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:55:36,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T16:55:36,357 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T16:55:36,358 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T16:55:36,358 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T16:55:36,358 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T16:55:36,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@238bf9b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir/,AVAILABLE} 2024-11-24T16:55:36,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@814e400{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T16:55:36,431 WARN [Thread-2471 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data1/current/BP-625031532-172.17.0.2-1732467335976/current, will proceed with Du for space computation calculation, 2024-11-24T16:55:36,431 WARN [Thread-2472 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data2/current/BP-625031532-172.17.0.2-1732467335976/current, will proceed with Du for space computation calculation, 2024-11-24T16:55:36,458 WARN [Thread-2450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:55:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6143c1cd7a56f743 with lease ID 0x5500557a6f94465b: Processing first storage report for DS-e2dd4110-fe16-4dea-8805-4578d96d7cc7 from datanode DatanodeRegistration(127.0.0.1:41413, datanodeUuid=6a18ff9c-c4bb-4f5e-8ab7-e7768707b593, infoPort=45823, infoSecurePort=0, ipcPort=45213, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976) 2024-11-24T16:55:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6143c1cd7a56f743 with lease ID 0x5500557a6f94465b: from storage DS-e2dd4110-fe16-4dea-8805-4578d96d7cc7 node DatanodeRegistration(127.0.0.1:41413, datanodeUuid=6a18ff9c-c4bb-4f5e-8ab7-e7768707b593, infoPort=45823, infoSecurePort=0, ipcPort=45213, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:55:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6143c1cd7a56f743 with lease ID 0x5500557a6f94465b: Processing first storage report for DS-b05f6f3b-7d99-4978-a65d-e1646bf40dad from datanode DatanodeRegistration(127.0.0.1:41413, datanodeUuid=6a18ff9c-c4bb-4f5e-8ab7-e7768707b593, infoPort=45823, infoSecurePort=0, ipcPort=45213, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976) 2024-11-24T16:55:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6143c1cd7a56f743 with lease ID 0x5500557a6f94465b: from storage DS-b05f6f3b-7d99-4978-a65d-e1646bf40dad node DatanodeRegistration(127.0.0.1:41413, datanodeUuid=6a18ff9c-c4bb-4f5e-8ab7-e7768707b593, infoPort=45823, infoSecurePort=0, ipcPort=45213, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:55:36,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@372f7d77{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/java.io.tmpdir/jetty-localhost-36747-hadoop-hdfs-3_4_1-tests_jar-_-any-5897557212582064125/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:55:36,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ffef2a8{HTTP/1.1, (http/1.1)}{localhost:36747} 2024-11-24T16:55:36,488 INFO [Time-limited test {}] server.Server(415): Started @327450ms 2024-11-24T16:55:36,489 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T16:55:36,603 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data3/current/BP-625031532-172.17.0.2-1732467335976/current, will proceed with Du for space computation calculation, 2024-11-24T16:55:36,603 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data4/current/BP-625031532-172.17.0.2-1732467335976/current, will proceed with Du for space computation calculation, 2024-11-24T16:55:36,623 WARN [Thread-2486 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T16:55:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64b71bf676dd38c9 with lease ID 0x5500557a6f94465c: Processing first storage report for DS-32344545-dd07-4c1a-a058-cdab7334f1a2 from datanode DatanodeRegistration(127.0.0.1:46391, datanodeUuid=0a9d6ffd-b736-4bb3-bc81-4672c793fc98, infoPort=46085, infoSecurePort=0, ipcPort=37439, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976) 2024-11-24T16:55:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64b71bf676dd38c9 with lease ID 0x5500557a6f94465c: from storage DS-32344545-dd07-4c1a-a058-cdab7334f1a2 node DatanodeRegistration(127.0.0.1:46391, datanodeUuid=0a9d6ffd-b736-4bb3-bc81-4672c793fc98, infoPort=46085, infoSecurePort=0, ipcPort=37439, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:55:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64b71bf676dd38c9 with lease ID 0x5500557a6f94465c: Processing first storage report for DS-e88c73df-ecd4-4c36-90ea-0cfbb615e32b from datanode DatanodeRegistration(127.0.0.1:46391, datanodeUuid=0a9d6ffd-b736-4bb3-bc81-4672c793fc98, infoPort=46085, infoSecurePort=0, ipcPort=37439, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976) 2024-11-24T16:55:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64b71bf676dd38c9 with lease ID 0x5500557a6f94465c: from storage DS-e88c73df-ecd4-4c36-90ea-0cfbb615e32b node DatanodeRegistration(127.0.0.1:46391, datanodeUuid=0a9d6ffd-b736-4bb3-bc81-4672c793fc98, infoPort=46085, infoSecurePort=0, ipcPort=37439, storageInfo=lv=-57;cid=testClusterID;nsid=38395460;c=1732467335976), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T16:55:36,723 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f 2024-11-24T16:55:36,725 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/zookeeper_0, clientPort=49509, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T16:55:36,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49509 2024-11-24T16:55:36,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:55:36,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741825_1001 (size=7) 2024-11-24T16:55:36,736 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2 with version=8 2024-11-24T16:55:36,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36099/user/jenkins/test-data/1ef50f84-f9d4-6bec-4101-64a7489c5d2b/hbase-staging 2024-11-24T16:55:36,738 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:55:36,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:55:36,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:55:36,738 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:55:36,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:55:36,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:55:36,739 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T16:55:36,739 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:55:36,739 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33317 2024-11-24T16:55:36,740 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33317 connecting to ZooKeeper ensemble=127.0.0.1:49509 2024-11-24T16:55:36,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:333170x0, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:55:36,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33317-0x1014ae790a40000 connected 2024-11-24T16:55:36,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,775 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:55:36,775 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2, hbase.cluster.distributed=false 2024-11-24T16:55:36,777 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:55:36,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33317 2024-11-24T16:55:36,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33317 2024-11-24T16:55:36,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33317 2024-11-24T16:55:36,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33317 2024-11-24T16:55:36,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33317 2024-11-24T16:55:36,795 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e335232f7491:0 server-side Connection retries=45 2024-11-24T16:55:36,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T16:55:36,796 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45543 2024-11-24T16:55:36,797 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45543 connecting to ZooKeeper ensemble=127.0.0.1:49509 2024-11-24T16:55:36,798 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455430x0, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T16:55:36,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455430x0, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:55:36,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45543-0x1014ae790a40001 connected 2024-11-24T16:55:36,805 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T16:55:36,805 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T16:55:36,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T16:55:36,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T16:55:36,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45543 2024-11-24T16:55:36,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45543 2024-11-24T16:55:36,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45543 2024-11-24T16:55:36,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45543 2024-11-24T16:55:36,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45543 2024-11-24T16:55:36,823 DEBUG [M:0;e335232f7491:33317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e335232f7491:33317 2024-11-24T16:55:36,823 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e335232f7491,33317,1732467336738 2024-11-24T16:55:36,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:55:36,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:55:36,825 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e335232f7491,33317,1732467336738 2024-11-24T16:55:36,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T16:55:36,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:36,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:36,827 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T16:55:36,828 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e335232f7491,33317,1732467336738 from backup master directory 2024-11-24T16:55:36,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e335232f7491,33317,1732467336738 2024-11-24T16:55:36,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:55:36,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T16:55:36,829 WARN [master/e335232f7491:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:55:36,829 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e335232f7491,33317,1732467336738 2024-11-24T16:55:36,833 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/hbase.id] with ID: 0e6c2e8a-c3a5-4470-8293-4fe752239fee 2024-11-24T16:55:36,833 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/.tmp/hbase.id 2024-11-24T16:55:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:55:36,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741826_1002 (size=42) 2024-11-24T16:55:36,842 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/.tmp/hbase.id]:[hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/hbase.id] 2024-11-24T16:55:36,854 INFO [master/e335232f7491:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:36,854 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T16:55:36,855 INFO [master/e335232f7491:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T16:55:36,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:36,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:36,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:55:36,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741827_1003 (size=196) 2024-11-24T16:55:36,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:36,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:37,266 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T16:55:37,267 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T16:55:37,267 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:55:37,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:55:37,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741828_1004 (size=1189) 2024-11-24T16:55:37,276 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store 2024-11-24T16:55:37,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:55:37,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741829_1005 (size=34) 2024-11-24T16:55:37,285 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:55:37,285 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:55:37,285 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:37,285 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:37,285 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:55:37,285 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:37,285 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:37,285 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467337285Disabling compacts and flushes for region at 1732467337285Disabling writes for close at 1732467337285Writing region close event to WAL at 1732467337285Closed at 1732467337285 2024-11-24T16:55:37,286 WARN [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/.initializing 2024-11-24T16:55:37,286 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/WALs/e335232f7491,33317,1732467336738 2024-11-24T16:55:37,288 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C33317%2C1732467336738, suffix=, logDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/WALs/e335232f7491,33317,1732467336738, archiveDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/oldWALs, maxLogs=10 2024-11-24T16:55:37,288 INFO [master/e335232f7491:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C33317%2C1732467336738.1732467337288 2024-11-24T16:55:37,295 INFO [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/WALs/e335232f7491,33317,1732467336738/e335232f7491%2C33317%2C1732467336738.1732467337288 2024-11-24T16:55:37,299 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46085:46085),(127.0.0.1/127.0.0.1:45823:45823)] 2024-11-24T16:55:37,304 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:55:37,304 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:55:37,305 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,305 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T16:55:37,307 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:37,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T16:55:37,308 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:55:37,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T16:55:37,310 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:55:37,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T16:55:37,311 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T16:55:37,312 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,312 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,313 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,314 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,314 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,314 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T16:55:37,315 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T16:55:37,317 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:55:37,317 INFO [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846028, jitterRate=0.07578080892562866}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T16:55:37,318 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732467337305Initializing all the Stores at 1732467337305Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467337305Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467337305Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467337305Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467337305Cleaning up temporary data from old regions at 1732467337314 (+9 ms)Region opened successfully at 1732467337318 (+4 ms) 2024-11-24T16:55:37,318 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T16:55:37,321 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7954b951, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:55:37,321 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T16:55:37,322 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T16:55:37,322 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T16:55:37,322 INFO [master/e335232f7491:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T16:55:37,322 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T16:55:37,322 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T16:55:37,322 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T16:55:37,330 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T16:55:37,331 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T16:55:37,333 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T16:55:37,333 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T16:55:37,333 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T16:55:37,335 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T16:55:37,335 INFO [master/e335232f7491:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T16:55:37,336 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T16:55:37,337 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T16:55:37,338 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T16:55:37,339 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T16:55:37,340 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T16:55:37,343 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T16:55:37,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:55:37,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T16:55:37,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,345 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e335232f7491,33317,1732467336738, sessionid=0x1014ae790a40000, setting cluster-up flag (Was=false) 2024-11-24T16:55:37,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,353 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T16:55:37,354 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,33317,1732467336738 2024-11-24T16:55:37,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,363 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T16:55:37,364 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e335232f7491,33317,1732467336738 2024-11-24T16:55:37,365 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T16:55:37,369 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T16:55:37,370 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T16:55:37,370 INFO [master/e335232f7491:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T16:55:37,370 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e335232f7491,33317,1732467336738 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e335232f7491:0, corePoolSize=5, maxPoolSize=5 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e335232f7491:0, corePoolSize=10, maxPoolSize=10 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:55:37,371 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,372 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732467367372 2024-11-24T16:55:37,372 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T16:55:37,372 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,373 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:55:37,373 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T16:55:37,373 INFO [master/e335232f7491:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T16:55:37,374 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,374 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467337374,5,FailOnTimeoutGroup] 2024-11-24T16:55:37,374 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T16:55:37,374 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467337374,5,FailOnTimeoutGroup] 2024-11-24T16:55:37,374 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,374 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T16:55:37,374 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,375 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:55:37,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741831_1007 (size=1321) 2024-11-24T16:55:37,387 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T16:55:37,387 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2 2024-11-24T16:55:37,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:55:37,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741832_1008 (size=32) 2024-11-24T16:55:37,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:55:37,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:55:37,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:55:37,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:37,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:55:37,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:55:37,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:37,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:55:37,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:55:37,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:37,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:55:37,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:55:37,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:37,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:37,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:55:37,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740 2024-11-24T16:55:37,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740 2024-11-24T16:55:37,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:55:37,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:55:37,411 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:55:37,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:55:37,414 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(746): ClusterId : 0e6c2e8a-c3a5-4470-8293-4fe752239fee 2024-11-24T16:55:37,414 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T16:55:37,417 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T16:55:37,417 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T16:55:37,419 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T16:55:37,419 DEBUG [RS:0;e335232f7491:45543 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56a3d3b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e335232f7491/172.17.0.2:0 2024-11-24T16:55:37,421 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T16:55:37,421 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863096, jitterRate=0.09748347103595734}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:55:37,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732467337397Initializing all the Stores at 1732467337398 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467337398Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467337400 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467337400Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467337400Cleaning up temporary data from old regions at 1732467337411 (+11 ms)Region opened successfully at 1732467337422 (+11 ms) 2024-11-24T16:55:37,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:55:37,422 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:55:37,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:55:37,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:55:37,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:55:37,425 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:55:37,425 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467337422Disabling compacts and flushes for region at 1732467337422Disabling writes for close at 1732467337422Writing region close event to WAL at 1732467337425 (+3 ms)Closed at 1732467337425 2024-11-24T16:55:37,426 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:55:37,426 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T16:55:37,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T16:55:37,428 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:55:37,429 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T16:55:37,437 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e335232f7491:45543 2024-11-24T16:55:37,438 INFO [RS:0;e335232f7491:45543 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T16:55:37,438 INFO [RS:0;e335232f7491:45543 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T16:55:37,438 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T16:55:37,438 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(2659): reportForDuty to master=e335232f7491,33317,1732467336738 with port=45543, startcode=1732467336795 2024-11-24T16:55:37,439 DEBUG [RS:0;e335232f7491:45543 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T16:55:37,441 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49457, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T16:55:37,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33317 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e335232f7491,45543,1732467336795 2024-11-24T16:55:37,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33317 {}] master.ServerManager(517): Registering regionserver=e335232f7491,45543,1732467336795 2024-11-24T16:55:37,442 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2 2024-11-24T16:55:37,442 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35337 2024-11-24T16:55:37,442 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T16:55:37,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:55:37,444 DEBUG [RS:0;e335232f7491:45543 {}] zookeeper.ZKUtil(111): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e335232f7491,45543,1732467336795 2024-11-24T16:55:37,444 WARN [RS:0;e335232f7491:45543 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T16:55:37,445 INFO [RS:0;e335232f7491:45543 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:55:37,445 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/e335232f7491,45543,1732467336795 2024-11-24T16:55:37,445 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e335232f7491,45543,1732467336795] 2024-11-24T16:55:37,451 INFO [RS:0;e335232f7491:45543 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T16:55:37,452 INFO [RS:0;e335232f7491:45543 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T16:55:37,452 INFO [RS:0;e335232f7491:45543 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T16:55:37,453 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,456 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T16:55:37,457 INFO [RS:0;e335232f7491:45543 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T16:55:37,457 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e335232f7491:0, corePoolSize=2, maxPoolSize=2 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e335232f7491:0, corePoolSize=1, maxPoolSize=1 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:55:37,458 DEBUG [RS:0;e335232f7491:45543 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e335232f7491:0, corePoolSize=3, maxPoolSize=3 2024-11-24T16:55:37,460 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,460 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,460 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,460 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,460 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,460 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45543,1732467336795-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:55:37,475 INFO [RS:0;e335232f7491:45543 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T16:55:37,476 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,45543,1732467336795-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,476 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,476 INFO [RS:0;e335232f7491:45543 {}] regionserver.Replication(171): e335232f7491,45543,1732467336795 started 2024-11-24T16:55:37,489 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:37,489 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1482): Serving as e335232f7491,45543,1732467336795, RpcServer on e335232f7491/172.17.0.2:45543, sessionid=0x1014ae790a40001 2024-11-24T16:55:37,489 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T16:55:37,489 DEBUG [RS:0;e335232f7491:45543 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e335232f7491,45543,1732467336795 2024-11-24T16:55:37,489 DEBUG [RS:0;e335232f7491:45543 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,45543,1732467336795' 2024-11-24T16:55:37,489 DEBUG [RS:0;e335232f7491:45543 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e335232f7491,45543,1732467336795 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e335232f7491,45543,1732467336795' 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T16:55:37,490 DEBUG [RS:0;e335232f7491:45543 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T16:55:37,491 DEBUG [RS:0;e335232f7491:45543 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T16:55:37,491 INFO [RS:0;e335232f7491:45543 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T16:55:37,491 INFO [RS:0;e335232f7491:45543 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T16:55:37,579 WARN [e335232f7491:33317 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T16:55:37,592 INFO [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C45543%2C1732467336795, suffix=, logDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/e335232f7491,45543,1732467336795, archiveDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs, maxLogs=32 2024-11-24T16:55:37,607 INFO [RS:0;e335232f7491:45543 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C45543%2C1732467336795.1732467337606 2024-11-24T16:55:37,613 INFO [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/e335232f7491,45543,1732467336795/e335232f7491%2C45543%2C1732467336795.1732467337606 2024-11-24T16:55:37,618 DEBUG [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46085:46085),(127.0.0.1/127.0.0.1:45823:45823)] 2024-11-24T16:55:37,829 DEBUG [e335232f7491:33317 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T16:55:37,830 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e335232f7491,45543,1732467336795 2024-11-24T16:55:37,831 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,45543,1732467336795, state=OPENING 2024-11-24T16:55:37,833 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T16:55:37,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:37,836 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T16:55:37,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:55:37,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,45543,1732467336795}] 2024-11-24T16:55:37,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:55:37,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,42939,1732467114627/e335232f7491%2C42939%2C1732467114627.1732467114828 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:37,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37129/user/jenkins/test-data/341ffd9e-415b-5022-8387-e3fbc24fb8c8/WALs/e335232f7491,41569,1732467113673/e335232f7491%2C41569%2C1732467113673.meta.1732467114492.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T16:55:37,989 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T16:55:37,992 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39213, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T16:55:37,996 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T16:55:37,996 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:55:37,998 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e335232f7491%2C45543%2C1732467336795.meta, suffix=.meta, logDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/e335232f7491,45543,1732467336795, archiveDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs, maxLogs=32 2024-11-24T16:55:37,999 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e335232f7491%2C45543%2C1732467336795.meta.1732467337999.meta 2024-11-24T16:55:38,016 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/e335232f7491,45543,1732467336795/e335232f7491%2C45543%2C1732467336795.meta.1732467337999.meta 2024-11-24T16:55:38,021 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46085:46085),(127.0.0.1/127.0.0.1:45823:45823)] 2024-11-24T16:55:38,027 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T16:55:38,028 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T16:55:38,028 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T16:55:38,028 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T16:55:38,028 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T16:55:38,028 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T16:55:38,028 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T16:55:38,029 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T16:55:38,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T16:55:38,031 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T16:55:38,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:38,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:38,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T16:55:38,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T16:55:38,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:38,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:38,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T16:55:38,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T16:55:38,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:38,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:38,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T16:55:38,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T16:55:38,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T16:55:38,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T16:55:38,036 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T16:55:38,036 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740 2024-11-24T16:55:38,037 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740 2024-11-24T16:55:38,039 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T16:55:38,039 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T16:55:38,039 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T16:55:38,040 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T16:55:38,041 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=761669, jitterRate=-0.0314885675907135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T16:55:38,041 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T16:55:38,042 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732467338029Writing region info on filesystem at 1732467338029Initializing all the Stores at 1732467338030 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467338030Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467338030Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732467338030Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732467338030Cleaning up temporary data from old regions at 1732467338039 (+9 ms)Running coprocessor post-open hooks at 1732467338041 (+2 ms)Region opened successfully at 1732467338042 (+1 ms) 2024-11-24T16:55:38,043 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732467337989 2024-11-24T16:55:38,045 DEBUG [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T16:55:38,045 INFO [RS_OPEN_META-regionserver/e335232f7491:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T16:55:38,046 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e335232f7491,45543,1732467336795 2024-11-24T16:55:38,047 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e335232f7491,45543,1732467336795, state=OPEN 2024-11-24T16:55:38,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:55:38,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T16:55:38,053 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e335232f7491,45543,1732467336795 2024-11-24T16:55:38,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:55:38,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T16:55:38,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T16:55:38,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e335232f7491,45543,1732467336795 in 217 msec 2024-11-24T16:55:38,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T16:55:38,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 629 msec 2024-11-24T16:55:38,058 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T16:55:38,058 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T16:55:38,059 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:55:38,059 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,45543,1732467336795, seqNum=-1] 2024-11-24T16:55:38,060 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:55:38,061 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40475, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:55:38,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 696 msec 2024-11-24T16:55:38,065 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732467338065, completionTime=-1 2024-11-24T16:55:38,065 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T16:55:38,065 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T16:55:38,067 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T16:55:38,067 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732467398067 2024-11-24T16:55:38,067 INFO [master/e335232f7491:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732467458067 2024-11-24T16:55:38,067 INFO [master/e335232f7491:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T16:55:38,067 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33317,1732467336738-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,068 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33317,1732467336738-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,068 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33317,1732467336738-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,068 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e335232f7491:33317, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,068 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,069 DEBUG [master/e335232f7491:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T16:55:38,072 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,076 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.247sec 2024-11-24T16:55:38,076 INFO [master/e335232f7491:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T16:55:38,076 INFO [master/e335232f7491:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T16:55:38,076 INFO [master/e335232f7491:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T16:55:38,076 INFO [master/e335232f7491:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T16:55:38,076 INFO [master/e335232f7491:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T16:55:38,077 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33317,1732467336738-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T16:55:38,077 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33317,1732467336738-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T16:55:38,079 DEBUG [master/e335232f7491:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T16:55:38,079 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T16:55:38,079 INFO [master/e335232f7491:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e335232f7491,33317,1732467336738-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T16:55:38,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734134af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:55:38,114 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e335232f7491,33317,-1 for getting cluster id 2024-11-24T16:55:38,115 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T16:55:38,116 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0e6c2e8a-c3a5-4470-8293-4fe752239fee' 2024-11-24T16:55:38,117 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T16:55:38,117 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0e6c2e8a-c3a5-4470-8293-4fe752239fee" 2024-11-24T16:55:38,117 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@770fcd5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:55:38,117 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e335232f7491,33317,-1] 2024-11-24T16:55:38,117 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T16:55:38,118 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:38,119 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T16:55:38,120 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fa9d1ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T16:55:38,120 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T16:55:38,121 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e335232f7491,45543,1732467336795, seqNum=-1] 2024-11-24T16:55:38,121 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T16:55:38,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T16:55:38,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e335232f7491,33317,1732467336738 2024-11-24T16:55:38,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T16:55:38,127 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T16:55:38,128 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T16:55:38,130 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs, maxLogs=32 2024-11-24T16:55:38,130 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732467338130 2024-11-24T16:55:38,140 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/test.com,8080,1/test.com%2C8080%2C1.1732467338130 2024-11-24T16:55:38,147 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45823:45823),(127.0.0.1/127.0.0.1:46085:46085)] 2024-11-24T16:55:38,151 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732467338151 2024-11-24T16:55:38,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,164 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,164 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/test.com,8080,1/test.com%2C8080%2C1.1732467338130 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/test.com,8080,1/test.com%2C8080%2C1.1732467338151 2024-11-24T16:55:38,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741835_1011 (size=93) 2024-11-24T16:55:38,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741835_1011 (size=93) 2024-11-24T16:55:38,168 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46085:46085),(127.0.0.1/127.0.0.1:45823:45823)] 2024-11-24T16:55:38,169 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/WALs/test.com,8080,1/test.com%2C8080%2C1.1732467338130 to hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs/test.com%2C8080%2C1.1732467338130 2024-11-24T16:55:38,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,169 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,169 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,169 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741836_1012 (size=93) 2024-11-24T16:55:38,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741836_1012 (size=93) 2024-11-24T16:55:38,174 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs 2024-11-24T16:55:38,174 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732467338151) 2024-11-24T16:55:38,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T16:55:38,174 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:55:38,174 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:55:38,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:38,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:38,174 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T16:55:38,174 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T16:55:38,174 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1033165583, stopped=false 2024-11-24T16:55:38,175 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e335232f7491,33317,1732467336738 2024-11-24T16:55:38,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:55:38,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T16:55:38,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:38,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:38,176 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:55:38,176 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T16:55:38,177 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:55:38,177 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:38,177 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e335232f7491,45543,1732467336795' ***** 2024-11-24T16:55:38,177 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T16:55:38,177 INFO [RS:0;e335232f7491:45543 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T16:55:38,177 INFO [RS:0;e335232f7491:45543 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T16:55:38,177 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T16:55:38,177 INFO [RS:0;e335232f7491:45543 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T16:55:38,177 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(959): stopping server e335232f7491,45543,1732467336795 2024-11-24T16:55:38,177 INFO [RS:0;e335232f7491:45543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:55:38,177 INFO [RS:0;e335232f7491:45543 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e335232f7491:45543. 2024-11-24T16:55:38,178 DEBUG [RS:0;e335232f7491:45543 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T16:55:38,178 DEBUG [RS:0;e335232f7491:45543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:38,178 INFO [RS:0;e335232f7491:45543 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T16:55:38,178 INFO [RS:0;e335232f7491:45543 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T16:55:38,178 INFO [RS:0;e335232f7491:45543 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T16:55:38,178 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T16:55:38,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:55:38,178 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T16:55:38,178 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T16:55:38,178 DEBUG [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T16:55:38,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T16:55:38,179 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T16:55:38,179 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T16:55:38,179 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T16:55:38,179 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T16:55:38,179 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T16:55:38,179 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T16:55:38,197 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/.tmp/ns/638fc3c094fb4ca6943d47702055ef5c is 43, key is default/ns:d/1732467338061/Put/seqid=0 2024-11-24T16:55:38,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741837_1013 (size=5153) 2024-11-24T16:55:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741837_1013 (size=5153) 2024-11-24T16:55:38,215 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/.tmp/ns/638fc3c094fb4ca6943d47702055ef5c 2024-11-24T16:55:38,221 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/.tmp/ns/638fc3c094fb4ca6943d47702055ef5c as hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/ns/638fc3c094fb4ca6943d47702055ef5c 2024-11-24T16:55:38,227 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/ns/638fc3c094fb4ca6943d47702055ef5c, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T16:55:38,228 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false 2024-11-24T16:55:38,228 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T16:55:38,233 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T16:55:38,234 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T16:55:38,234 INFO [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T16:55:38,234 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732467338179Running coprocessor pre-close hooks at 1732467338179Disabling compacts and flushes for region at 1732467338179Disabling writes for close at 1732467338179Obtaining lock to block concurrent updates at 1732467338179Preparing flush snapshotting stores in 1588230740 at 1732467338179Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732467338180 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732467338180Flushing 1588230740/ns: creating writer at 1732467338181 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732467338197 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732467338197Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f5b594c: reopening flushed file at 1732467338220 (+23 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false at 1732467338228 (+8 ms)Writing region close event to WAL at 1732467338229 (+1 ms)Running coprocessor post-close hooks at 1732467338234 (+5 ms)Closed at 1732467338234 2024-11-24T16:55:38,235 DEBUG [RS_CLOSE_META-regionserver/e335232f7491:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T16:55:38,379 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(976): stopping server e335232f7491,45543,1732467336795; all regions closed. 2024-11-24T16:55:38,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,380 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741834_1010 (size=1152) 2024-11-24T16:55:38,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741834_1010 (size=1152) 2024-11-24T16:55:38,385 DEBUG [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs 2024-11-24T16:55:38,385 INFO [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C45543%2C1732467336795.meta:.meta(num 1732467337999) 2024-11-24T16:55:38,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,385 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741833_1009 (size=93) 2024-11-24T16:55:38,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741833_1009 (size=93) 2024-11-24T16:55:38,390 DEBUG [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/oldWALs 2024-11-24T16:55:38,390 INFO [RS:0;e335232f7491:45543 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e335232f7491%2C45543%2C1732467336795:(num 1732467337606) 2024-11-24T16:55:38,390 DEBUG [RS:0;e335232f7491:45543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T16:55:38,390 INFO [RS:0;e335232f7491:45543 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T16:55:38,390 INFO [RS:0;e335232f7491:45543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:55:38,391 INFO [RS:0;e335232f7491:45543 {}] hbase.ChoreService(370): Chore service for: regionserver/e335232f7491:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T16:55:38,391 INFO [RS:0;e335232f7491:45543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:55:38,391 INFO [regionserver/e335232f7491:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:55:38,391 INFO [RS:0;e335232f7491:45543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45543 2024-11-24T16:55:38,393 INFO [RS:0;e335232f7491:45543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:55:38,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e335232f7491,45543,1732467336795 2024-11-24T16:55:38,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T16:55:38,396 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e335232f7491,45543,1732467336795] 2024-11-24T16:55:38,397 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e335232f7491,45543,1732467336795 already deleted, retry=false 2024-11-24T16:55:38,397 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e335232f7491,45543,1732467336795 expired; onlineServers=0 2024-11-24T16:55:38,397 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e335232f7491,33317,1732467336738' ***** 2024-11-24T16:55:38,397 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T16:55:38,397 INFO [M:0;e335232f7491:33317 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T16:55:38,397 INFO [M:0;e335232f7491:33317 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T16:55:38,398 DEBUG [M:0;e335232f7491:33317 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T16:55:38,398 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T16:55:38,398 DEBUG [M:0;e335232f7491:33317 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T16:55:38,398 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467337374 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.large.0-1732467337374,5,FailOnTimeoutGroup] 2024-11-24T16:55:38,398 INFO [M:0;e335232f7491:33317 {}] hbase.ChoreService(370): Chore service for: master/e335232f7491:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T16:55:38,398 INFO [M:0;e335232f7491:33317 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T16:55:38,398 DEBUG [M:0;e335232f7491:33317 {}] master.HMaster(1795): Stopping service threads 2024-11-24T16:55:38,398 INFO [M:0;e335232f7491:33317 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T16:55:38,398 INFO [M:0;e335232f7491:33317 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T16:55:38,398 INFO [M:0;e335232f7491:33317 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T16:55:38,398 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T16:55:38,398 DEBUG [master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467337374 {}] cleaner.HFileCleaner(306): Exit Thread[master/e335232f7491:0:becomeActiveMaster-HFileCleaner.small.0-1732467337374,5,FailOnTimeoutGroup] 2024-11-24T16:55:38,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T16:55:38,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T16:55:38,399 DEBUG [M:0;e335232f7491:33317 {}] zookeeper.ZKUtil(347): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T16:55:38,399 WARN [M:0;e335232f7491:33317 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T16:55:38,400 INFO [M:0;e335232f7491:33317 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/.lastflushedseqids 2024-11-24T16:55:38,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741838_1014 (size=99) 2024-11-24T16:55:38,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741838_1014 (size=99) 2024-11-24T16:55:38,413 INFO [M:0;e335232f7491:33317 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T16:55:38,413 INFO [M:0;e335232f7491:33317 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T16:55:38,413 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T16:55:38,414 INFO [M:0;e335232f7491:33317 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:38,414 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:38,414 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T16:55:38,414 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:38,414 INFO [M:0;e335232f7491:33317 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T16:55:38,437 DEBUG [M:0;e335232f7491:33317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a18a06af840d4cc494e3fbcf16312813 is 82, key is hbase:meta,,1/info:regioninfo/1732467338046/Put/seqid=0 2024-11-24T16:55:38,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741839_1015 (size=5672) 2024-11-24T16:55:38,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741839_1015 (size=5672) 2024-11-24T16:55:38,448 INFO [M:0;e335232f7491:33317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a18a06af840d4cc494e3fbcf16312813 2024-11-24T16:55:38,479 DEBUG [M:0;e335232f7491:33317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/02a4853ebce4437b871253110bfca9da is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732467338065/Put/seqid=0 2024-11-24T16:55:38,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741840_1016 (size=5275) 2024-11-24T16:55:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741840_1016 (size=5275) 2024-11-24T16:55:38,488 INFO [M:0;e335232f7491:33317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/02a4853ebce4437b871253110bfca9da 2024-11-24T16:55:38,496 INFO [RS:0;e335232f7491:45543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:55:38,496 INFO [RS:0;e335232f7491:45543 {}] regionserver.HRegionServer(1031): Exiting; stopping=e335232f7491,45543,1732467336795; zookeeper connection closed. 2024-11-24T16:55:38,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:38,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45543-0x1014ae790a40001, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:38,497 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@37040a0d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@37040a0d 2024-11-24T16:55:38,497 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T16:55:38,516 DEBUG [M:0;e335232f7491:33317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ea1e25be4be24ea8b497e1bd18187b0b is 69, key is e335232f7491,45543,1732467336795/rs:state/1732467337441/Put/seqid=0 2024-11-24T16:55:38,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741841_1017 (size=5156) 2024-11-24T16:55:38,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741841_1017 (size=5156) 2024-11-24T16:55:38,527 INFO [M:0;e335232f7491:33317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ea1e25be4be24ea8b497e1bd18187b0b 2024-11-24T16:55:38,550 DEBUG [M:0;e335232f7491:33317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5469c872d7c54b37a84d6fd0286170b5 is 52, key is load_balancer_on/state:d/1732467338126/Put/seqid=0 2024-11-24T16:55:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741842_1018 (size=5056) 2024-11-24T16:55:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741842_1018 (size=5056) 2024-11-24T16:55:38,558 INFO [M:0;e335232f7491:33317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5469c872d7c54b37a84d6fd0286170b5 2024-11-24T16:55:38,563 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a18a06af840d4cc494e3fbcf16312813 as hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a18a06af840d4cc494e3fbcf16312813 2024-11-24T16:55:38,568 INFO [M:0;e335232f7491:33317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a18a06af840d4cc494e3fbcf16312813, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T16:55:38,569 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/02a4853ebce4437b871253110bfca9da as hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/02a4853ebce4437b871253110bfca9da 2024-11-24T16:55:38,573 INFO [M:0;e335232f7491:33317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/02a4853ebce4437b871253110bfca9da, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T16:55:38,574 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ea1e25be4be24ea8b497e1bd18187b0b as hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ea1e25be4be24ea8b497e1bd18187b0b 2024-11-24T16:55:38,580 INFO [M:0;e335232f7491:33317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ea1e25be4be24ea8b497e1bd18187b0b, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T16:55:38,581 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5469c872d7c54b37a84d6fd0286170b5 as hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5469c872d7c54b37a84d6fd0286170b5 2024-11-24T16:55:38,587 INFO [M:0;e335232f7491:33317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35337/user/jenkins/test-data/b5de7b57-382b-60bf-4ebe-f6f3d99d96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5469c872d7c54b37a84d6fd0286170b5, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T16:55:38,588 INFO [M:0;e335232f7491:33317 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=29, compaction requested=false 2024-11-24T16:55:38,596 INFO [M:0;e335232f7491:33317 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T16:55:38,596 DEBUG [M:0;e335232f7491:33317 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732467338413Disabling compacts and flushes for region at 1732467338413Disabling writes for close at 1732467338414 (+1 ms)Obtaining lock to block concurrent updates at 1732467338414Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732467338414Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732467338414Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732467338415 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732467338415Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732467338437 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732467338437Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732467338455 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732467338479 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732467338479Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732467338494 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732467338515 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732467338515Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732467338532 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732467338549 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732467338549Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b85c028: reopening flushed file at 1732467338562 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b034d8: reopening flushed file at 1732467338568 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c72ed42: reopening flushed file at 1732467338574 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c36ba8b: reopening flushed file at 1732467338580 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=29, compaction requested=false at 1732467338588 (+8 ms)Writing region close event to WAL at 1732467338596 (+8 ms)Closed at 1732467338596 2024-11-24T16:55:38,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,597 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T16:55:38,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41413 is added to blk_1073741830_1006 (size=10311) 2024-11-24T16:55:38,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46391 is added to blk_1073741830_1006 (size=10311) 2024-11-24T16:55:38,605 INFO [M:0;e335232f7491:33317 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T16:55:38,605 INFO [M:0;e335232f7491:33317 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33317 2024-11-24T16:55:38,605 INFO [M:0;e335232f7491:33317 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T16:55:38,605 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T16:55:38,708 INFO [M:0;e335232f7491:33317 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T16:55:38,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:38,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33317-0x1014ae790a40000, quorum=127.0.0.1:49509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T16:55:38,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@372f7d77{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:55:38,711 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ffef2a8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:55:38,711 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:55:38,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@814e400{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:55:38,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@238bf9b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir/,STOPPED} 2024-11-24T16:55:38,713 WARN [BP-625031532-172.17.0.2-1732467335976 heartbeating to localhost/127.0.0.1:35337 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:55:38,713 WARN [BP-625031532-172.17.0.2-1732467335976 heartbeating to localhost/127.0.0.1:35337 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-625031532-172.17.0.2-1732467335976 (Datanode Uuid 0a9d6ffd-b736-4bb3-bc81-4672c793fc98) service to localhost/127.0.0.1:35337 2024-11-24T16:55:38,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:55:38,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data4/current/BP-625031532-172.17.0.2-1732467335976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:38,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data3/current/BP-625031532-172.17.0.2-1732467335976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:38,715 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:55:38,715 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:55:38,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@483384c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T16:55:38,721 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e70e237{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:55:38,721 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:55:38,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8a9439{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:55:38,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24bb5ef7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir/,STOPPED} 2024-11-24T16:55:38,722 WARN [BP-625031532-172.17.0.2-1732467335976 heartbeating to localhost/127.0.0.1:35337 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T16:55:38,722 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T16:55:38,722 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T16:55:38,722 WARN [BP-625031532-172.17.0.2-1732467335976 heartbeating to localhost/127.0.0.1:35337 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-625031532-172.17.0.2-1732467335976 (Datanode Uuid 6a18ff9c-c4bb-4f5e-8ab7-e7768707b593) service to localhost/127.0.0.1:35337 2024-11-24T16:55:38,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data1/current/BP-625031532-172.17.0.2-1732467335976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:38,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/cluster_885baf13-47c9-7bb1-263b-860f7324909b/data/data2/current/BP-625031532-172.17.0.2-1732467335976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T16:55:38,724 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T16:55:38,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d175de5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T16:55:38,730 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51af64d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T16:55:38,730 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T16:55:38,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32d01bcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T16:55:38,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d7a0e2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/73764f7d-dfb1-c116-6122-40b7eb398d2f/hadoop.log.dir/,STOPPED} 2024-11-24T16:55:38,738 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T16:55:38,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T16:55:38,769 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 227) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35337 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35337 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35337 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35337 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35337 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35337 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35337 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35337 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=535 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=254 (was 180) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6714 (was 5408) - AvailableMemoryMB LEAK? -